diff --git a/.lgtm.yml b/.lgtm.yml deleted file mode 100644 index 4a43aa25c57..00000000000 --- a/.lgtm.yml +++ /dev/null @@ -1,8 +0,0 @@ -extraction: - javascript: - index: - filters: - - exclude: "**/*.js" -queries: - - exclude: py/missing-equals - - exclude: py/import-and-import-from diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c5989869f8a..5a1af781d0e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,28 +1,28 @@ repos: - repo: https://github.com/psf/black - rev: 23.9.1 + rev: 23.10.1 hooks: - id: black args: [--quiet] # Ruff mne - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.292 + rev: v0.1.3 hooks: - id: ruff name: ruff mne - args: ["--fix", "--exit-non-zero-on-fix"] + args: ["--fix"] files: ^mne/ # Ruff tutorials and examples - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.292 + rev: v0.1.3 hooks: - id: ruff name: ruff tutorials and examples # D103: missing docstring in public function # D400: docstring first line must end with period - args: ["--ignore=D103,D400", "--fix", "--exit-non-zero-on-fix"] + args: ["--ignore=D103,D400", "--fix"] files: ^tutorials/|^examples/ # Codespell @@ -41,3 +41,13 @@ repos: hooks: - id: yamllint args: [--strict, -c, .yamllint.yml] + + # rstcheck + - repo: https://github.com/rstcheck/rstcheck.git + rev: v6.2.0 + hooks: + - id: rstcheck + files: ^doc/.*\.(rst|inc)$ + # https://github.com/rstcheck/rstcheck/issues/199 + # https://github.com/rstcheck/rstcheck/issues/200 + exclude: ^doc/(help/faq|install/manual_install|install/mne_c|install/advanced|install/updating|_includes/channel_interpolation|_includes/inverse|_includes/ssp)\.rst$ diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 396cfe956b2..1b8ddc505a4 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -108,7 +108,7 @@ stages: - bash: | set -e python -m pip install --progress-bar off --upgrade pip setuptools wheel - python -m pip install --progress-bar off "mne-qt-browser[opengl] @ git+https://github.com/mne-tools/mne-qt-browser.git@main" pyvista scikit-learn pytest-error-for-skips python-picard "PyQt6!=6.5.1" qtpy + python -m pip install --progress-bar off "mne-qt-browser[opengl] @ git+https://github.com/mne-tools/mne-qt-browser.git@main" pyvista scikit-learn pytest-error-for-skips python-picard "PyQt6!=6.5.1" qtpy nibabel python -m pip uninstall -yq mne python -m pip install --progress-bar off --upgrade -e .[test] displayName: 'Install dependencies with pip' diff --git a/doc/_includes/dig_formats.rst b/doc/_includes/dig_formats.rst index c2d3fde4c27..5928b081aea 100644 --- a/doc/_includes/dig_formats.rst +++ b/doc/_includes/dig_formats.rst @@ -1,4 +1,5 @@ :orphan: + .. _dig-formats: Supported formats for digitized 3D locations diff --git a/doc/changes/devel.rst b/doc/changes/devel.rst index a0a7056a607..ffafe84c5ed 100644 --- a/doc/changes/devel.rst +++ b/doc/changes/devel.rst @@ -38,13 +38,19 @@ Enhancements - Add support for writing forward solutions to HDF5 and convenience function :meth:`mne.Forward.save` (:gh:`12036` by `Eric Larson`_) - Refactored internals of :func:`mne.read_annotations` (:gh:`11964` by `Paul Roujansky`_) - Add support for drawing MEG sensors in :ref:`mne coreg` (:gh:`12098` by `Eric Larson`_) +- Add ``check_version=True`` to :ref:`mne sys_info` to check for a new release on GitHub (:gh:`12146` by `Eric Larson`_) +- Bad channels are now colored gray in addition to being dashed when spatial colors are used in :func:`mne.viz.plot_evoked` and related functions (:gh:`12142` by `Eric Larson`_) - By default MNE-Python creates matplotlib figures with ``layout='constrained'`` rather than the default ``layout='tight'`` (:gh:`12050`, :gh:`12103` by `Mathieu Scheltienne`_ and `Eric Larson`_) - Enhance :func:`~mne.viz.plot_evoked_field` with a GUI that has controls for time, colormap, and contour lines (:gh:`11942` by `Marijn van Vliet`_) - Add :class:`mne.viz.ui_events.UIEvent` linking for interactive colorbars, allowing users to link figures and change the colormap and limits interactively. This supports :func:`~mne.viz.plot_evoked_topomap`, :func:`~mne.viz.plot_ica_components`, :func:`~mne.viz.plot_tfr_topomap`, :func:`~mne.viz.plot_projs_topomap`, :meth:`~mne.Evoked.plot_image`, and :meth:`~mne.Epochs.plot_image` (:gh:`12057` by `Santeri Ruuskanen`_) - Add example KIT phantom dataset in :func:`mne.datasets.phantom_kit.data_path` and :ref:`tut-phantom-kit` (:gh:`12105` by `Judy D Zhu`_ and `Eric Larson`_) +- :func:`~mne.epochs.make_metadata` now accepts ``tmin=None`` and ``tmax=None``, which will bound the time window used for metadata generation by event names (instead of a fixed time). That way, you can now for example generate metadata spanning from one cue or fixation cross to the next, even if trial durations vary throughout the recording (:gh:`12118` by `Richard Höchenberger`_) +- Add support for passing multiple labels to :func:`mne.minimum_norm.source_induced_power` (:gh:`12026` by `Erica Peterson`_, `Eric Larson`_, and `Daniel McCloy`_ ) +- Added documentation to :meth:`mne.io.Raw.set_montage` and :func:`mne.add_reference_channels` to specify that montages should be set after adding reference channels (:gh:`12160` by `Jacob Woessner`_) Bugs ~~~~ +- Fix bug where :func:`mne.io.read_raw_gdf` would fail due to improper usage of ``np.clip`` (:gh:`12168` by :newcontrib:`Rasmus Aagaard`) - Fix bugs with :func:`mne.preprocessing.realign_raw` where the start of ``other`` was incorrectly cropped; and onsets and durations in ``other.annotations`` were left unsynced with the resampled data (:gh:`11950` by :newcontrib:`Qian Chu`) - Fix bug where ``encoding`` argument was ignored when reading annotations from an EDF file (:gh:`11958` by :newcontrib:`Andrew Gilbert`) - Mark tests ``test_adjacency_matches_ft`` and ``test_fetch_uncompressed_file`` as network tests (:gh:`12041` by :newcontrib:`Maksym Balatsko`) @@ -60,10 +66,13 @@ Bugs - Fix bug with :meth:`mne.viz.Brain.get_view` where calling :meth:`~mne.viz.Brain.show_view` with returned parameters would change the view (:gh:`12000` by `Eric Larson`_) - Fix bug with :meth:`mne.viz.Brain.show_view` where ``distance=None`` would change the view distance (:gh:`12000` by `Eric Larson`_) - Fix bug with :meth:`~mne.viz.Brain.add_annotation` when reading an annotation from a file with both hemispheres shown (:gh:`11946` by `Marijn van Vliet`_) +- Fix bug with reported component number and errant reporting of PCA explained variance as ICA explained variance in :meth:`mne.Report.add_ica` (:gh:`12155`, :gh:`12167` by `Eric Larson`_ and `Richard Höchenberger`_) - Fix bug with axis clip box boundaries in :func:`mne.viz.plot_evoked_topo` and related functions (:gh:`11999` by `Eric Larson`_) - Fix bug with ``subject_info`` when loading data from and exporting to EDF file (:gh:`11952` by `Paul Roujansky`_) +- Fix bug where :class:`mne.Info` HTML representations listed all channel counts instead of good channel counts under the heading "Good channels" (:gh:`12145` by `Eric Larson`_) - Fix rendering glitches when plotting Neuromag/TRIUX sensors in :func:`mne.viz.plot_alignment` and related functions (:gh:`12098` by `Eric Larson`_) - Fix bug with delayed checking of :class:`info["bads"] ` (:gh:`12038` by `Eric Larson`_) +- Fix bug with :ref:`mne coreg` where points inside the head surface were not shown (:gh:`12147`, :gh:`12164` by `Eric Larson`_) - Fix bug with :func:`mne.viz.plot_alignment` where ``sensor_colors`` were not handled properly on a per-channel-type basis (:gh:`12067` by `Eric Larson`_) - Fix handling of channel information in annotations when loading data from and exporting to EDF file (:gh:`11960` :gh:`12017` :gh:`12044` by `Paul Roujansky`_) - Add missing ``overwrite`` and ``verbose`` parameters to :meth:`Transform.save() ` (:gh:`12004` by `Marijn van Vliet`_) @@ -72,8 +81,11 @@ Bugs - Fix :func:`~mne.viz.plot_volume_source_estimates` with :class:`~mne.VolSourceEstimate` which include a list of vertices (:gh:`12025` by `Mathieu Scheltienne`_) - Add support for non-ASCII characters in Annotations, Evoked comments, etc when saving to FIFF format (:gh:`12080` by `Daniel McCloy`_) - Correctly handle passing ``"eyegaze"`` or ``"pupil"`` to :meth:`mne.io.Raw.pick` (:gh:`12019` by `Scott Huberty`_) +- Fix bug with :func:`mne.time_frequency.Spectrum.plot` and related functions where bad channels were not marked (:gh:`12142` by `Eric Larson`_) - Fix bug with :func:`~mne.viz.plot_raw` where changing ``MNE_BROWSER_BACKEND`` via :func:`~mne.set_config` would have no effect within a Python session (:gh:`12078` by `Santeri Ruuskanen`_) - Improve handling of ``method`` argument in the channel interpolation function to support :class:`str` and raise helpful error messages (:gh:`12113` by `Mathieu Scheltienne`_) +- Fix combination of ``DIN`` event channels into a single synthetic trigger channel ``STI 014`` by the MFF reader of :func:`mne.io.read_raw_egi` (:gh:`12122` by `Mathieu Scheltienne`_) +- Fix bug with :func:`mne.io.read_raw_eeglab` and :func:`mne.read_epochs_eeglab` where automatic fiducial detection would fail for certain files (:gh:`12165` by `Clemens Brunner`_) API changes ~~~~~~~~~~~ @@ -81,3 +93,4 @@ API changes - :func:`mne.io.kit.read_mrk` reading pickled files is deprecated using something like ``np.savetxt(fid, pts, delimiter="\t", newline="\n")`` to save your points instead (:gh:`11937` by `Eric Larson`_) - Replace legacy ``inst.pick_channels`` and ``inst.pick_types`` with ``inst.pick`` (where ``inst`` is an instance of :class:`~mne.io.Raw`, :class:`~mne.Epochs`, or :class:`~mne.Evoked`) wherever possible (:gh:`11907` by `Clemens Brunner`_) - The ``reset_camera`` parameter has been removed in favor of ``distance="auto"`` in :func:`mne.viz.set_3d_view`, :meth:`mne.viz.Brain.show_view`, and related functions (:gh:`12000` by `Eric Larson`_) +- Several unused parameters from :func:`mne.gui.coregistration` are now deprecated: tabbed, split, scrollable, head_inside, guess_mri_subject, scale, and ``advanced_rendering``. All arguments are also now keyword-only. (:gh:`12147` by `Eric Larson`_) diff --git a/doc/changes/names.inc b/doc/changes/names.inc index 722f0c2dc0d..925e38bfd22 100644 --- a/doc/changes/names.inc +++ b/doc/changes/names.inc @@ -184,7 +184,7 @@ .. _George O'Neill: https://georgeoneill.github.io -.. _Gonzalo Reina: https://greina.me/ +.. _Gonzalo Reina: https://github.com/Gon-reina .. _Guillaume Dumas: https://mila.quebec/en/person/guillaume-dumas @@ -442,6 +442,8 @@ .. _ramonapariciog: https://github.com/ramonapariciog +.. _Rasmus Aagaard: https://github.com/rasgaard + .. _Rasmus Zetter: https://people.aalto.fi/rasmus.zetter .. _Reza Nasri: https://github.com/rznas diff --git a/doc/changes/v0.17.rst b/doc/changes/v0.17.rst index 82db1975a9f..40896b6f383 100644 --- a/doc/changes/v0.17.rst +++ b/doc/changes/v0.17.rst @@ -201,7 +201,7 @@ Bug - Fix processing of data with bad segments and acquisition skips with new ``skip_by_annotation`` parameter in :func:`mne.preprocessing.maxwell_filter` by `Eric Larson`_ -- Fix symlinking to use relative paths in ``mne flash_bem` and ``mne watershed_bem`` by `Eric Larson`_ +- Fix symlinking to use relative paths in ``mne flash_bem`` and ``mne watershed_bem`` by `Eric Larson`_ - Fix error in mne coreg when saving with scaled MRI if fiducials haven't been saved by `Ezequiel Mikulan`_ diff --git a/doc/changes/v0.18.rst b/doc/changes/v0.18.rst index e525e8849a9..4e73e42239b 100644 --- a/doc/changes/v0.18.rst +++ b/doc/changes/v0.18.rst @@ -8,7 +8,7 @@ Changelog - Add ``event_id='auto'`` in :func:`mne.events_from_annotations` to accommodate Brainvision markers by `Jona Sassenhagen`_, `Joan Massich`_ and `Eric Larson`_ -- Add example on how to simulate raw data using subject anatomy, by `Ivana Kojcic`_,`Eric Larson`_,`Samuel Deslauriers-Gauthier`_ and`Kostiantyn Maksymenko`_ +- Add example on how to simulate raw data using subject anatomy, by `Ivana Kojcic`_, `Eric Larson`_, `Samuel Deslauriers-Gauthier`_ and `Kostiantyn Maksymenko`_ - :func:`mne.beamformer.apply_lcmv_cov` returns static source power after supplying a data covariance matrix to the beamformer filter by `Britta Westner`_ and `Marijn van Vliet`_ @@ -159,7 +159,7 @@ Bug - Fix 32bits annotations in :func:`mne.io.read_raw_cnt` by `Joan Massich`_ -- Fix :func:`mne.events_from_annotations` to ignore ``'BAD_'` and ``'EDGE_'`` annotations by default using a new default ``regexp`` by `Eric Larson`_ +- Fix :func:`mne.events_from_annotations` to ignore ``'BAD_'`` and ``'EDGE_'`` annotations by default using a new default ``regexp`` by `Eric Larson`_ - Fix bug in ``mne.preprocessing.mark_flat`` where ``raw.first_samp`` was not taken into account by `kalenkovich`_ diff --git a/doc/changes/v1.0.rst b/doc/changes/v1.0.rst index 0c053e134a5..dd5e7b501ed 100644 --- a/doc/changes/v1.0.rst +++ b/doc/changes/v1.0.rst @@ -132,7 +132,7 @@ Bugs - Fix use of arguments in :func:`numpy.loadtxt` (:gh:`10189` by :newcontrib:`Federico Zamberlan`) -- Fix documentation of options in :func:`mne.stc_near_sensors` (:gh:`` by :newcontrib:`Nikolai Chapochnikov`) +- Fix documentation of options in :func:`mne.stc_near_sensors` (:gh:`10007` by :newcontrib:`Nikolai Chapochnikov`) - :func:`mne.time_frequency.tfr_array_multitaper` now returns results per taper when ``output='complex'`` (:gh:`10281` by `Mikołaj Magnuski`_) diff --git a/doc/changes/v1.1.rst b/doc/changes/v1.1.rst index 50ebc8111e8..de0f597c0ee 100644 --- a/doc/changes/v1.1.rst +++ b/doc/changes/v1.1.rst @@ -131,7 +131,7 @@ Bugs - Fix bug in :func:`mne.io.read_raw_ctf` where invalid measurement dates were not handled properly (:gh:`10957` by `Jean-Remi King`_ and `Eric Larson`_) -- Rendering issues with recent MESA releases can be avoided by setting the new environment variable``MNE_3D_OPTION_MULTI_SAMPLES=1`` or using :func:`mne.viz.set_3d_options` (:gh:`10513` by `Eric Larson`_) +- Rendering issues with recent MESA releases can be avoided by setting the new environment variable ``MNE_3D_OPTION_MULTI_SAMPLES=1`` or using :func:`mne.viz.set_3d_options` (:gh:`10513` by `Eric Larson`_) - Fix behavior for the ``pyvista`` 3D renderer's ``quiver3D`` function so that default arguments plot a glyph in ``arrow`` mode (:gh:`10493` by `Alex Rockhill`_) diff --git a/doc/changes/v1.4.rst b/doc/changes/v1.4.rst index 735a7b6af18..2fa9ec2a0d1 100644 --- a/doc/changes/v1.4.rst +++ b/doc/changes/v1.4.rst @@ -8,7 +8,7 @@ Enhancements - Add functionality for reading CNT spans/annotations marked bad to :func:`mne.io.read_raw_cnt` (:gh:`11631` by :newcontrib:`Jacob Woessner`) - Add ``:unit:`` Sphinx directive to enable use of uniform non-breaking spaces throughout the documentation (:gh:`11469` by :newcontrib:`Sawradip Saha`) - Adjusted the algorithm used in :class:`mne.decoding.SSD` to support non-full rank data (:gh:`11458` by :newcontrib:`Thomas Binns`) -- Changed suggested type for ``ch_groups``` in `mne.viz.plot_sensors` from array to list of list(s) (arrays are still supported). (:gh:`11465` by `Hyonyoung Shin`_) +- Changed suggested type for ``ch_groups`` in `mne.viz.plot_sensors` from array to list of list(s) (arrays are still supported). (:gh:`11465` by `Hyonyoung Shin`_) - Add support for UCL/FIL OPM data using :func:`mne.io.read_raw_fil` (:gh:`11366` by :newcontrib:`George O'Neill` and `Robert Seymour`_) - Add harmonic field correction (HFC) for OPM sensors in :func:`mne.preprocessing.compute_proj_hfc` (:gh:`11536` by :newcontrib:`George O'Neill` and `Eric Larson`_) - Forward argument ``axes`` from `mne.viz.plot_sensors` to `mne.channels.DigMontage.plot` (:gh:`11470` by :newcontrib:`Jan Ebert` and `Mathieu Scheltienne`_) diff --git a/doc/changes/v1.5.rst b/doc/changes/v1.5.rst index a272e6c6fdc..c607aefe26d 100644 --- a/doc/changes/v1.5.rst +++ b/doc/changes/v1.5.rst @@ -76,7 +76,7 @@ Bugs API changes ~~~~~~~~~~~ - The ``baseline`` argument can now be array-like (e.g. ``list``, ``tuple``, ``np.ndarray``, ...) instead of only a ``tuple`` (:gh:`11713` by `Clemens Brunner`_) -- The ``events`` and ``event_id`` parameters of `:meth:`Epochs.plot() ` now accept boolean values; see docstring for details (:gh:`11445` by `Daniel McCloy`_ and `Clemens Brunner`_) +- The ``events`` and ``event_id`` parameters of :meth:`Epochs.plot() ` now accept boolean values; see docstring for details (:gh:`11445` by `Daniel McCloy`_ and `Clemens Brunner`_) - Deprecated ``gap_description`` keyword argument of :func:`mne.io.read_raw_eyelink`, which will be removed in mne version 1.6, in favor of using :meth:`mne.Annotations.rename` (:gh:`11719` by `Scott Huberty`_) Authors diff --git a/doc/conf.py b/doc/conf.py index f9128227cd6..df835e4a088 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -23,6 +23,7 @@ from numpydoc import docscrape import mne +import mne.html_templates._templates from mne.tests.test_docstring_parameters import error_ignores from mne.utils import ( linkcode_resolve, # noqa, analysis:ignore @@ -41,6 +42,7 @@ # https://numba.readthedocs.io/en/latest/reference/deprecation.html#deprecation-of-old-style-numba-captured-errors # noqa: E501 os.environ["NUMBA_CAPTURED_ERRORS"] = "new_style" sphinx_logger = sphinx.util.logging.getLogger("mne") +mne.html_templates._templates._COLLAPSED = True # collapse info _repr_html_ # -- Path setup -------------------------------------------------------------- @@ -720,11 +722,17 @@ def append_attr_meth_examples(app, what, name, obj, options, lines): "https://doi.org/10.1088/", # www.tandfonline.com "https://doi.org/10.3109/", # www.tandfonline.com "https://www.researchgate.net/profile/", + "https://www.intel.com/content/www/us/en/developer/tools/oneapi/onemkl.html", + "https://scholar.google.com/scholar?cites=12188330066413208874&as_ylo=2014", + "https://scholar.google.com/scholar?cites=1521584321377182930&as_ylo=2013", + # 500 server error + "https://openwetware.org/wiki/Beauchamp:FreeSurfer", # 503 Server error "https://hal.archives-ouvertes.fr/hal-01848442", # Read timed out "http://www.cs.ucl.ac.uk/staff/d.barber/brml", "https://www.cea.fr", + "http://www.humanconnectome.org/data", # Max retries exceeded "https://doi.org/10.7488/ds/1556", "https://datashare.is.ed.ac.uk/handle/10283", diff --git a/doc/development/contributing.rst b/doc/development/contributing.rst index d741c540479..2dbf90d306b 100644 --- a/doc/development/contributing.rst +++ b/doc/development/contributing.rst @@ -215,8 +215,6 @@ Once you have git installed and configured, and before creating your local copy of the codebase, go to the `MNE-Python GitHub`_ page and create a `fork`_ into your GitHub user account. -.. image:: https://docs.github.com/assets/cb-28613/images/help/repository/fork_button.png - This will create a copy of the MNE-Python codebase inside your GitHub user account (this is called "your fork"). Changes you make to MNE-Python will eventually get "pushed" to your fork, and will be incorporated into the @@ -1113,3 +1111,5 @@ it can serve as a useful example of what to expect from the PR review process. .. _optipng: http://optipng.sourceforge.net/ .. _optipng for Windows: http://prdownloads.sourceforge.net/optipng/optipng-0.7.7-win32.zip?download + +.. include:: ../links.inc diff --git a/doc/development/governance.rst b/doc/development/governance.rst index f7d81ee8a85..f5b70e39485 100644 --- a/doc/development/governance.rst +++ b/doc/development/governance.rst @@ -69,7 +69,7 @@ BDFL ---- The Project will have a BDFL (Benevolent Dictator for Life), who is currently -Alexandre Gramfort. As Dictator, the BDFL has the authority to make all final +Daniel McCloy. As Dictator, the BDFL has the authority to make all final decisions for The Project. As Benevolent, the BDFL, in practice, chooses to defer that authority to the consensus of the community discussion channels and the Steering Council (see below). It is expected, and in the past has been the diff --git a/doc/documentation/design_philosophy.rst b/doc/documentation/design_philosophy.rst index af43f630aa1..5bdec09b4fa 100644 --- a/doc/documentation/design_philosophy.rst +++ b/doc/documentation/design_philosophy.rst @@ -94,3 +94,5 @@ of data. .. LINKS .. _`method chaining`: https://en.wikipedia.org/wiki/Method_chaining + +.. include:: ../links.inc diff --git a/doc/documentation/index.rst b/doc/documentation/index.rst index 6830edff012..764fcd08188 100644 --- a/doc/documentation/index.rst +++ b/doc/documentation/index.rst @@ -59,3 +59,5 @@ Documentation for the related C and MATLAB tools are available here: cookbook cite cited + +.. include:: ../links.inc diff --git a/doc/help/faq.rst b/doc/help/faq.rst index ea280290c14..14d85f4e038 100644 --- a/doc/help/faq.rst +++ b/doc/help/faq.rst @@ -275,6 +275,7 @@ magnitude. Forward and Inverse Solution ============================ +.. _faq_how_should_i_regularize: How should I regularize the covariance matrix? ---------------------------------------------- diff --git a/doc/install/mne_tools_suite.rst b/doc/install/mne_tools_suite.rst index 579e3c77c08..03b65671826 100644 --- a/doc/install/mne_tools_suite.rst +++ b/doc/install/mne_tools_suite.rst @@ -112,3 +112,5 @@ Help with installation is available through the `MNE Forum`_. See the .. _invertmeeg: https://github.com/LukeTheHecker/invert .. _MNE-ARI: https://github.com/john-veillette/mne_ari .. _niseq: https://github.com/john-veillette/niseq + +.. include:: ../links.inc diff --git a/doc/overview/people.rst b/doc/overview/people.rst index 14c20724095..3647aae978a 100644 --- a/doc/overview/people.rst +++ b/doc/overview/people.rst @@ -22,8 +22,6 @@ Steering Council * `Daniel McCloy`_ * `Denis Engemann`_ * `Eric Larson`_ -* `Guillaume Favelier`_ -* `Luke Bloy`_ * `Mainak Jas`_ * `Marijn van Vliet`_ * `Mathieu Scheltienne`_ diff --git a/examples/preprocessing/eeg_bridging.py b/examples/preprocessing/eeg_bridging.py index 7eadb7239d2..6d2c1aec165 100644 --- a/examples/preprocessing/eeg_bridging.py +++ b/examples/preprocessing/eeg_bridging.py @@ -10,7 +10,7 @@ electrode connects with the gel conducting signal from another electrode "bridging" the two signals. This is undesirable because the signals from the two (or more) electrodes are not as independent as they would otherwise be; -they are very similar to each other introducting additional +they are very similar to each other introducing additional spatial smearing. An algorithm has been developed to detect electrode bridging :footcite:`TenkeKayser2001`, which has been implemented in EEGLAB :footcite:`DelormeMakeig2004`. Unfortunately, there is not a lot to be diff --git a/examples/time_frequency/source_label_time_frequency.py b/examples/time_frequency/source_label_time_frequency.py index f88d1ce2c50..a9c32934e38 100644 --- a/examples/time_frequency/source_label_time_frequency.py +++ b/examples/time_frequency/source_label_time_frequency.py @@ -35,8 +35,8 @@ meg_path = data_path / "MEG" / "sample" raw_fname = meg_path / "sample_audvis_raw.fif" fname_inv = meg_path / "sample_audvis-meg-oct-6-meg-inv.fif" -label_name = "Aud-rh" -fname_label = meg_path / "labels" / f"{label_name}.label" +label_names = ["Aud-lh", "Aud-rh"] +fname_labels = [meg_path / "labels" / f"{ln}.label" for ln in label_names] tmin, tmax, event_id = -0.2, 0.5, 2 @@ -70,7 +70,8 @@ # Compute a source estimate per frequency band including and excluding the # evoked response freqs = np.arange(7, 30, 2) # define frequencies of interest -label = mne.read_label(fname_label) +labels = [mne.read_label(fl) for fl in fname_labels] +label = labels[0] n_cycles = freqs / 3.0 # different number of cycle per frequency # subtract the evoked response in order to exclude evoked activity @@ -122,3 +123,44 @@ ) ax.set(xlabel="Time (s)", ylabel="Frequency (Hz)", title=f"ITC ({title})") fig.colorbar(ax.images[0], ax=axes[ii]) + +# %% + +############################################################################## +# In the example above, we averaged power across vertices after calculating +# power because we provided a single label for power calculation and therefore +# power of all sources within the single label were returned separately. When +# we provide a list of labels, power is averaged across sources within each +# label automatically. With a list of labels, averaging is performed before +# rescaling, so choose a baseline method appropriately. + + +# Get power from multiple labels +multi_label_power = source_induced_power( + epochs, + inverse_operator, + freqs, + labels, + baseline=(-0.1, 0), + baseline_mode="mean", + n_cycles=n_cycles, + n_jobs=None, + return_plv=False, +) + +# visually compare evoked power in left and right auditory regions +fig, axes = plt.subplots(ncols=2, layout="constrained") +for l_idx, l_power in enumerate(multi_label_power): + ax = axes[l_idx] + ax.imshow( + l_power, + extent=[epochs.times[0], epochs.times[-1], freqs[0], freqs[-1]], + aspect="auto", + origin="lower", + vmin=multi_label_power.min(), + vmax=multi_label_power.max(), + cmap="RdBu_r", + ) + title = f"{labels[l_idx].hemi.upper()} Evoked Power" + ax.set(xlabel="Time (s)", ylabel="Frequency (Hz)", title=title) + fig.colorbar(ax.images[0], ax=ax) diff --git a/mne/_fiff/meas_info.py b/mne/_fiff/meas_info.py index 7b74e8c0ead..5d67b77470c 100644 --- a/mne/_fiff/meas_info.py +++ b/mne/_fiff/meas_info.py @@ -9,8 +9,7 @@ import datetime import operator import string -import uuid -from collections import Counter, OrderedDict +from collections import Counter, OrderedDict, defaultdict from collections.abc import Mapping from copy import deepcopy from io import BytesIO @@ -411,6 +410,12 @@ def set_montage( a montage. Other channel types (e.g., MEG channels) should have their positions defined properly using their data reading functions. + .. warning:: + Applying a montage will only set locations of channels that exist + at the time it is applied. This means when + :ref:`re-referencing ` + make sure to apply the montage only after calling + :func:`mne.add_reference_channels` """ # How to set up a montage to old named fif file (walk through example) # https://gist.github.com/massich/f6a9f4799f1fbeb8f5e8f8bc7b07d3df @@ -1852,37 +1857,25 @@ def _get_chs_for_repr(self): titles = _handle_default("titles") # good channels - channels = {} - ch_types = [channel_type(self, idx) for idx in range(len(self["chs"]))] - ch_counts = Counter(ch_types) - for ch_type, count in ch_counts.items(): - if ch_type == "meg": - channels["mag"] = len(pick_types(self, meg="mag")) - channels["grad"] = len(pick_types(self, meg="grad")) - elif ch_type == "eog": - pick_eog = pick_types(self, eog=True) - eog = ", ".join(np.array(self["ch_names"])[pick_eog]) - elif ch_type == "ecg": - pick_ecg = pick_types(self, ecg=True) - ecg = ", ".join(np.array(self["ch_names"])[pick_ecg]) - channels[ch_type] = count - + good_names = defaultdict(lambda: list()) + for ci, ch_name in enumerate(self["ch_names"]): + if ch_name in self["bads"]: + continue + ch_type = channel_type(self, ci) + good_names[ch_type].append(ch_name) good_channels = ", ".join( - [f"{v} {titles.get(k, k.upper())}" for k, v in channels.items()] + [f"{len(v)} {titles.get(k, k.upper())}" for k, v in good_names.items()] ) - - if "ecg" not in channels.keys(): - ecg = "Not available" - if "eog" not in channels.keys(): - eog = "Not available" + for key in ("ecg", "eog"): # ensure these are present + if key not in good_names: + good_names[key] = list() + for key, val in good_names.items(): + good_names[key] = ", ".join(val) or "Not available" # bad channels - if len(self["bads"]) > 0: - bad_channels = ", ".join(self["bads"]) - else: - bad_channels = "None" + bad_channels = ", ".join(self["bads"]) or "None" - return good_channels, bad_channels, ecg, eog + return good_channels, bad_channels, good_names["ecg"], good_names["eog"] @repr_html def _repr_html_(self, caption=None, duration=None, filenames=None): @@ -1918,10 +1911,8 @@ def _repr_html_(self, caption=None, duration=None, filenames=None): info_template = _get_html_template("repr", "info.html.jinja") sections = ("General", "Channels", "Data") - section_ids = [f"section_{str(uuid.uuid4())}" for _ in sections] return html + info_template.render( sections=sections, - section_ids=section_ids, caption=caption, meas_date=meas_date, projs=projs, diff --git a/mne/_fiff/reference.py b/mne/_fiff/reference.py index 01d6e6e1230..0062bc4f40f 100644 --- a/mne/_fiff/reference.py +++ b/mne/_fiff/reference.py @@ -175,6 +175,14 @@ def add_reference_channels(inst, ref_channels, copy=True): ------- inst : instance of Raw | Epochs | Evoked Data with added EEG reference channels. + + Notes + ----- + .. warning:: + When :ref:`re-referencing `, + make sure to apply the montage using :meth:`mne.io.Raw.set_montage` + only after calling this function. Applying a montage will only set + locations of channels that exist at the time it is applied. """ from ..epochs import BaseEpochs from ..evoked import Evoked @@ -239,7 +247,9 @@ def add_reference_channels(inst, ref_channels, copy=True): ref_dig_array = np.full(12, np.nan) logger.info( "Location for this channel is unknown; consider calling " - "set_montage() again if needed." + "set_montage() after adding new reference channels if needed. " + "Applying a montage will only set locations of channels that " + "exist at the time it is applied." ) for ch in ref_channels: diff --git a/mne/_fiff/tests/test_meas_info.py b/mne/_fiff/tests/test_meas_info.py index 5ecb9e62775..bcb8b96f8c8 100644 --- a/mne/_fiff/tests/test_meas_info.py +++ b/mne/_fiff/tests/test_meas_info.py @@ -899,11 +899,11 @@ def test_repr_html(): assert "EEG 053" in info._repr_html_() html = info._repr_html_() - for ch in [ - "204 Gradiometers", + for ch in [ # good channel counts + "203 Gradiometers", "102 Magnetometers", "9 Stimulus", - "60 EEG", + "59 EEG", "1 EOG", ]: assert ch in html diff --git a/mne/commands/mne_coreg.py b/mne/commands/mne_coreg.py index add4a65845b..ed440e987e3 100644 --- a/mne/commands/mne_coreg.py +++ b/mne/commands/mne_coreg.py @@ -44,7 +44,7 @@ def run(): "--tabbed", dest="tabbed", action="store_true", - default=False, + default=None, help="Option for small screens: Combine " "the data source panel and the coregistration panel " "into a single panel with tabs.", @@ -103,6 +103,7 @@ def run(): "--simple-rendering", action="store_false", dest="advanced_rendering", + default=None, help="Use simplified OpenGL rendering", ) _add_verbose_flag(parser) @@ -131,7 +132,7 @@ def run(): faulthandler.enable() mne.gui.coregistration( - options.tabbed, + tabbed=options.tabbed, inst=options.inst, subject=options.subject, subjects_dir=subjects_dir, @@ -139,7 +140,7 @@ def run(): head_opacity=options.head_opacity, head_high_res=head_high_res, trans=trans, - scrollable=True, + scrollable=None, interaction=options.interaction, scale=options.scale, advanced_rendering=options.advanced_rendering, diff --git a/mne/commands/mne_sys_info.py b/mne/commands/mne_sys_info.py index 62bbf9afbfe..ae355c2cef5 100644 --- a/mne/commands/mne_sys_info.py +++ b/mne/commands/mne_sys_info.py @@ -41,6 +41,13 @@ def run(): action="store_false", default=True, ) + parser.add_option( + "--no-check-version", + dest="check_version", + help="Disable MNE-Python remote version checking.", + action="store_false", + default=True, + ) options, args = parser.parse_args() dependencies = "developer" if options.developer else "user" if len(args) != 0: @@ -51,6 +58,7 @@ def run(): show_paths=options.show_paths, dependencies=dependencies, unicode=options.unicode, + check_version=options.check_version, ) diff --git a/mne/commands/tests/test_commands.py b/mne/commands/tests/test_commands.py index fecd0235a87..b2a93abfa96 100644 --- a/mne/commands/tests/test_commands.py +++ b/mne/commands/tests/test_commands.py @@ -319,6 +319,7 @@ def test_watershed_bem(tmp_path): @testing.requires_testing_data def test_flash_bem(tmp_path): """Test mne flash_bem.""" + pytest.importorskip("nibabel") check_usage(mne_flash_bem, force_help=True) # Copy necessary files to tempdir tempdir = Path(str(tmp_path)) @@ -538,7 +539,7 @@ def test_sys_info(): with ArgvSetter((raw_fname,)): with pytest.raises(SystemExit, match="1"): mne_sys_info.run() - with ArgvSetter() as out: + with ArgvSetter(("--no-check-version",)) as out: mne_sys_info.run() assert "numpy" in out.stdout.getvalue() diff --git a/mne/conftest.py b/mne/conftest.py index e4174e7deb9..33396621890 100644 --- a/mne/conftest.py +++ b/mne/conftest.py @@ -92,8 +92,6 @@ def pytest_configure(config): # Fixtures for fixture in ( "matplotlib_config", - "close_all", - "check_verbose", "qt_config", "protect_config", ): @@ -176,6 +174,7 @@ def pytest_configure(config): ignore:numpy\.core\._multiarray_umath.*:DeprecationWarning ignore:numpy\.core\.numeric is deprecated.*:DeprecationWarning ignore:numpy\.core\.multiarray is deprecated.*:DeprecationWarning + ignore:The numpy\.fft\.helper has been made private.*:DeprecationWarning # TODO: Should actually fix these two ignore:scipy.signal.morlet2 is deprecated in SciPy.*:DeprecationWarning ignore:The `needs_threshold` and `needs_proba`.*:FutureWarning @@ -267,10 +266,7 @@ def matplotlib_config(): # functionality) plt.ioff() plt.rcParams["figure.dpi"] = 100 - try: - plt.rcParams["figure.raise_window"] = False - except KeyError: # MPL < 3.3 - pass + plt.rcParams["figure.raise_window"] = False # Make sure that we always reraise exceptions in handlers orig = cbook.CallbackRegistry diff --git a/mne/cov.py b/mne/cov.py index db4b2126a3d..376cd6a8a59 100644 --- a/mne/cov.py +++ b/mne/cov.py @@ -1895,9 +1895,8 @@ def regularize( .. note:: This function is kept for reasons of backward-compatibility. Please consider explicitly using the ``method`` parameter in :func:`mne.compute_covariance` to directly combine estimation - with regularization in a data-driven fashion. See the `faq - `_ - for more information. + with regularization in a data-driven fashion. See the + :ref:`FAQ ` for more information. Parameters ---------- diff --git a/mne/decoding/tests/test_search_light.py b/mne/decoding/tests/test_search_light.py index 00b4f98f997..3d5009763eb 100644 --- a/mne/decoding/tests/test_search_light.py +++ b/mne/decoding/tests/test_search_light.py @@ -2,6 +2,7 @@ # # License: BSD-3-Clause +import platform from inspect import signature import numpy as np @@ -10,7 +11,7 @@ from mne.decoding.search_light import GeneralizingEstimator, SlidingEstimator from mne.decoding.transformer import Vectorizer -from mne.utils import _record_warnings, use_log_level +from mne.utils import _record_warnings, check_version, use_log_level pytest.importorskip("sklearn") @@ -29,6 +30,9 @@ def make_data(): def test_search_light(): """Test SlidingEstimator.""" + # https://github.com/scikit-learn/scikit-learn/issues/27711 + if platform.system() == "Windows" and check_version("numpy", "2.0.0.dev0"): + pytest.skip("sklearn int_t / long long mismatch") from sklearn.linear_model import LogisticRegression, Ridge from sklearn.metrics import make_scorer, roc_auc_score from sklearn.pipeline import make_pipeline diff --git a/mne/epochs.py b/mne/epochs.py index 5af8f382c88..510161f99bc 100644 --- a/mne/epochs.py +++ b/mne/epochs.py @@ -2664,17 +2664,18 @@ def make_metadata( keep_first=None, keep_last=None, ): - """Generate metadata from events for use with `mne.Epochs`. + """Automatically generate metadata for use with `mne.Epochs` from events. This function mimics the epoching process (it constructs time windows around time-locked "events of interest") and collates information about any other events that occurred within those time windows. The information - is returned as a :class:`pandas.DataFrame` suitable for use as + is returned as a :class:`pandas.DataFrame`, suitable for use as `~mne.Epochs` metadata: one row per time-locked event, and columns - indicating presence/absence and latency of each ancillary event type. + indicating presence or absence and latency of each ancillary event type. The function will also return a new ``events`` array and ``event_id`` - dictionary that correspond to the generated metadata. + dictionary that correspond to the generated metadata, which together can then be + readily fed into `~mne.Epochs`. Parameters ---------- @@ -2687,9 +2688,9 @@ def make_metadata( A mapping from event names (keys) to event IDs (values). The event names will be incorporated as columns of the returned metadata :class:`~pandas.DataFrame`. - tmin, tmax : float - Start and end of the time interval for metadata generation in seconds, - relative to the time-locked event of the respective time window. + tmin, tmax : float | None + Start and end of the time interval for metadata generation in seconds, relative + to the time-locked event of the respective time window (the "row events"). .. note:: If you are planning to attach the generated metadata to @@ -2697,15 +2698,27 @@ def make_metadata( your epochs time interval, pass the same ``tmin`` and ``tmax`` values here as you use for your epochs. + If ``None``, the time window used for metadata generation is bounded by the + ``row_events``. This is can be particularly practical if trial duration varies + greatly, but each trial starts with a known event (e.g., a visual cue or + fixation). + + .. note:: + If ``tmin=None``, the first time window for metadata generation starts with + the first row event. If ``tmax=None``, the last time window for metadata + generation ends with the last event in ``events``. + + .. versionchanged:: 1.6.0 + Added support for ``None``. sfreq : float The sampling frequency of the data from which the events array was extracted. row_events : list of str | str | None - Event types around which to create the time windows / for which to - create **rows** in the returned metadata :class:`pandas.DataFrame`. If - provided, the string(s) must be keys of ``event_id``. If ``None`` - (default), rows are created for **all** event types present in - ``event_id``. + Event types around which to create the time windows. For each of these + time-locked events, we will create a **row** in the returned metadata + :class:`pandas.DataFrame`. If provided, the string(s) must be keys of + ``event_id``. If ``None`` (default), rows are created for **all** event types + present in ``event_id``. keep_first : str | list of str | None Specify subsets of :term:`hierarchical event descriptors` (HEDs, inspired by :footcite:`BigdelyShamloEtAl2013`) matching events of which @@ -2780,8 +2793,10 @@ def make_metadata( The time window used for metadata generation need not correspond to the time window used to create the `~mne.Epochs`, to which the metadata will be attached; it may well be much shorter or longer, or not overlap at all, - if desired. The can be useful, for example, to include events that occurred - before or after an epoch, e.g. during the inter-trial interval. + if desired. This can be useful, for example, to include events that + occurred before or after an epoch, e.g. during the inter-trial interval. + If either ``tmin``, ``tmax``, or both are ``None``, the time window will + typically vary, too. .. versionadded:: 0.23 @@ -2791,7 +2806,11 @@ def make_metadata( """ pd = _check_pandas_installed() + _validate_type(events, types=("array-like",), item_name="events") _validate_type(event_id, types=(dict,), item_name="event_id") + _validate_type(sfreq, types=("numeric",), item_name="sfreq") + _validate_type(tmin, types=("numeric", None), item_name="tmin") + _validate_type(tmax, types=("numeric", None), item_name="tmax") _validate_type(row_events, types=(None, str, list, tuple), item_name="row_events") _validate_type(keep_first, types=(None, str, list, tuple), item_name="keep_first") _validate_type(keep_last, types=(None, str, list, tuple), item_name="keep_last") @@ -2840,8 +2859,8 @@ def _ensure_list(x): # First and last sample of each epoch, relative to the time-locked event # This follows the approach taken in mne.Epochs - start_sample = int(round(tmin * sfreq)) - stop_sample = int(round(tmax * sfreq)) + 1 + start_sample = None if tmin is None else int(round(tmin * sfreq)) + stop_sample = None if tmax is None else int(round(tmax * sfreq)) + 1 # Make indexing easier # We create the DataFrame before subsetting the events so we end up with @@ -2887,16 +2906,49 @@ def _ensure_list(x): start_idx = stop_idx metadata.iloc[:, start_idx:] = None - # We're all set, let's iterate over all eventns and fill in in the + # We're all set, let's iterate over all events and fill in in the # respective cells in the metadata. We will subset this to include only # `row_events` later for row_event in events_df.itertuples(name="RowEvent"): row_idx = row_event.Index metadata.loc[row_idx, "event_name"] = id_to_name_map[row_event.id] - # Determine which events fall into the current epoch - window_start_sample = row_event.sample + start_sample - window_stop_sample = row_event.sample + stop_sample + # Determine which events fall into the current time window + if start_sample is None: + # Lower bound is the current event. + window_start_sample = row_event.sample + else: + # Lower bound is determined by tmin. + window_start_sample = row_event.sample + start_sample + + if stop_sample is None: + # Upper bound: next event of the same type, or the last event (of + # any type) if no later event of the same type can be found. + next_events = events_df.loc[ + (events_df["sample"] > row_event.sample), + :, + ] + if next_events.size == 0: + # We've reached the last event in the recording. + window_stop_sample = row_event.sample + elif next_events.loc[next_events["id"] == row_event.id, :].size > 0: + # There's still an event of the same type appearing after the + # current event. Stop one sample short, we don't want to include that + # last event here, but in the next iteration. + window_stop_sample = ( + next_events.loc[next_events["id"] == row_event.id, :].iloc[0][ + "sample" + ] + - 1 + ) + else: + # There are still events after the current one, but not of the + # same type. + window_stop_sample = next_events.iloc[-1]["sample"] + else: + # Upper bound is determined by tmax. + window_stop_sample = row_event.sample + stop_sample + events_in_window = events_df.loc[ (events_df["sample"] >= window_start_sample) & (events_df["sample"] <= window_stop_sample), @@ -4062,12 +4114,13 @@ def _concatenate_epochs( event_id = deepcopy(out.event_id) selection = out.selection # offset is the last epoch + tmax + 10 second - shift = int((10 + tmax) * out.info["sfreq"]) + shift = np.int64((10 + tmax) * out.info["sfreq"]) # Allow reading empty epochs (ToDo: Maybe not anymore in the future) if out._allow_empty: events_offset = 0 else: events_offset = int(np.max(events[0][:, 0])) + shift + events_offset = np.int64(events_offset) events_overflow = False warned = False for ii, epochs in enumerate(epochs_list[1:], 1): diff --git a/mne/gui/_gui.py b/mne/gui/_gui.py index de6e35482b8..82d7591651d 100644 --- a/mne/gui/_gui.py +++ b/mne/gui/_gui.py @@ -7,8 +7,9 @@ @verbose def coregistration( - tabbed=False, - split=True, + *, + tabbed=None, + split=None, width=None, inst=None, subject=None, @@ -18,15 +19,14 @@ def coregistration( head_opacity=None, head_high_res=None, trans=None, - scrollable=True, - *, - orient_to_surface=True, - scale_by_distance=True, - mark_inside=True, + scrollable=None, + orient_to_surface=None, + scale_by_distance=None, + mark_inside=None, interaction=None, scale=None, advanced_rendering=None, - head_inside=True, + head_inside=None, fullscreen=None, show=True, block=False, @@ -143,10 +143,10 @@ def coregistration( .. youtube:: ALV5qqMHLlQ """ unsupported_params = { - "tabbed": (tabbed, False), - "split": (split, True), - "scrollable": (scrollable, True), - "head_inside": (head_inside, True), + "tabbed": tabbed, + "split": split, + "scrollable": scrollable, + "head_inside": head_inside, "guess_mri_subject": guess_mri_subject, "scale": scale, "advanced_rendering": advanced_rendering, @@ -158,22 +158,17 @@ def coregistration( to_raise = val is not None if to_raise: warn( - f"The parameter {key} is not supported with" - " the pyvistaqt 3d backend. It will be ignored." + f"The parameter {key} is deprecated and will be removed in 1.7, do " + "not pass a value for it", + FutureWarning, ) + del tabbed, split, scrollable, head_inside, guess_mri_subject, scale + del advanced_rendering config = get_config() - if guess_mri_subject is None: - guess_mri_subject = config.get("MNE_COREG_GUESS_MRI_SUBJECT", "true") == "true" if head_high_res is None: head_high_res = config.get("MNE_COREG_HEAD_HIGH_RES", "true") == "true" - if advanced_rendering is None: - advanced_rendering = ( - config.get("MNE_COREG_ADVANCED_RENDERING", "true") == "true" - ) if head_opacity is None: head_opacity = config.get("MNE_COREG_HEAD_OPACITY", 0.8) - if head_inside is None: - head_inside = config.get("MNE_COREG_HEAD_INSIDE", "true").lower() == "true" if width is None: width = config.get("MNE_COREG_WINDOW_WIDTH", 800) if height is None: @@ -183,23 +178,23 @@ def coregistration( subjects_dir = config["SUBJECTS_DIR"] elif "MNE_COREG_SUBJECTS_DIR" in config: subjects_dir = config["MNE_COREG_SUBJECTS_DIR"] + false_like = ("false", "0") if orient_to_surface is None: - orient_to_surface = config.get("MNE_COREG_ORIENT_TO_SURFACE", "") == "true" + orient_to_surface = config.get("MNE_COREG_ORIENT_TO_SURFACE", "true").lower() + orient_to_surface = orient_to_surface not in false_like if scale_by_distance is None: - scale_by_distance = config.get("MNE_COREG_SCALE_BY_DISTANCE", "") == "true" + scale_by_distance = config.get("MNE_COREG_SCALE_BY_DISTANCE", "true").lower() + scale_by_distance = scale_by_distance not in false_like if interaction is None: interaction = config.get("MNE_COREG_INTERACTION", "terrain") if mark_inside is None: - mark_inside = config.get("MNE_COREG_MARK_INSIDE", "") == "true" - if scale is None: - scale = config.get("MNE_COREG_SCENE_SCALE", 0.16) + mark_inside = config.get("MNE_COREG_MARK_INSIDE", "true").lower() + mark_inside = mark_inside not in false_like if fullscreen is None: fullscreen = config.get("MNE_COREG_FULLSCREEN", "") == "true" head_opacity = float(head_opacity) - head_inside = bool(head_inside) width = int(width) height = int(height) - scale = float(scale) from ..viz.backends.renderer import MNE_3D_BACKEND_TESTING from ._coreg import CoregistrationUI diff --git a/mne/html_templates/_templates.py b/mne/html_templates/_templates.py index dff9c6b6c18..2ece5fea66f 100644 --- a/mne/html_templates/_templates.py +++ b/mne/html_templates/_templates.py @@ -1,5 +1,7 @@ import functools +_COLLAPSED = False # will override in doc build + @functools.lru_cache(maxsize=2) def _get_html_templates_env(kind): @@ -19,4 +21,18 @@ def _get_html_templates_env(kind): def _get_html_template(kind, name): - return _get_html_templates_env(kind).get_template(name) + return _RenderWrap( + _get_html_templates_env(kind).get_template(name), + collapsed=_COLLAPSED, + ) + + +class _RenderWrap: + """Class that allows functools.partial-like wrapping of jinja2 Template.render().""" + + def __init__(self, template, **kwargs): + self._template = template + self._kwargs = kwargs + + def render(self, *args, **kwargs): + return self._template.render(*args, **kwargs, **self._kwargs) diff --git a/mne/html_templates/repr/evoked.html.jinja b/mne/html_templates/repr/evoked.html.jinja index cd3c471b3d0..bb9ef0e5f97 100644 --- a/mne/html_templates/repr/evoked.html.jinja +++ b/mne/html_templates/repr/evoked.html.jinja @@ -11,6 +11,7 @@ Timepoints {{ evoked.data.shape[1] }} samples + Channels {{ evoked.data.shape[0] }} channels diff --git a/mne/html_templates/repr/info.html.jinja b/mne/html_templates/repr/info.html.jinja index f6d46c49f34..5b787cbfe31 100644 --- a/mne/html_templates/repr/info.html.jinja +++ b/mne/html_templates/repr/info.html.jinja @@ -1,144 +1,101 @@ - - - - - - - - - - - - - {% if meas_date is not none %} - - {% else %} - - {% endif %} - - - - {% if experimenter is not none %} - - {% else %} - - {% endif %} - - - - {% if subject_info is defined and subject_info is not none %} + + {{sections[0]}} +
- -
Measurement date{{ meas_date }}Unknown
Experimenter{{ experimenter }}Unknown
Participant
+ + + {% if meas_date is not none %} + + {% else %} + + {% endif %} + + + + {% if experimenter is not none %} + + {% else %} + + {% endif %} + + + + {% if subject_info is defined and subject_info is not none %} {% if 'his_id' in subject_info.keys() %} {% endif %} - {% else %} - - {% endif %} - - - - - - - {% if dig is not none %} - - {% else %} - - {% endif %} - - - - - - - - - - - - - - - - - - - - - {% if sfreq is not none %} - - - - - {% endif %} - {% if highpass is not none %} - - - - - {% endif %} - {% if lowpass is not none %} - - - - - {% endif %} - {% if projs is not none %} - - - - - {% endif %} - {% if filenames %} - - - - - {% endif %} - {% if duration %} - - - - - {% endif %} -
Measurement date{{ meas_date }}Unknown
Experimenter{{ experimenter }}Unknown
Participant{{ subject_info['his_id'] }}Unknown
- -
Digitized points{{ dig|length }} pointsNot available
Good channels{{ good_channels }}
Bad channels{{ bad_channels }}
EOG channels{{ eog }}
ECG channels{{ ecg }}
- -
Sampling frequency{{ '%0.2f'|format(sfreq) }} Hz
Highpass{{ '%0.2f'|format(highpass) }} Hz
Lowpass{{ '%0.2f'|format(lowpass) }} Hz
Projections{{ projs|join('
') | safe }}
Filenames{{ filenames|join('
') }}
Duration{{ duration }} (HH:MM:SS)
+ {% else %} + Unknown + {% endif %} + + + + + {{sections[1]}} + + + + {% if dig is not none %} + + {% else %} + + {% endif %} + + + + + + + + + + + + + + + + + +
Digitized points{{ dig|length }} pointsNot available
Good channels{{ good_channels }}
Bad channels{{ bad_channels }}
EOG channels{{ eog }}
ECG channels{{ ecg }}
+ + + {{sections[2]}} + + {% if sfreq is not none %} + + + + + {% endif %} + {% if highpass is not none %} + + + + + {% endif %} + {% if lowpass is not none %} + + + + + {% endif %} + {% if projs is not none %} + + + + + {% endif %} + {% if filenames %} + + + + + {% endif %} + {% if duration %} + + + + + {% endif %} +
Sampling frequency{{ '%0.2f'|format(sfreq) }} Hz
Highpass{{ '%0.2f'|format(highpass) }} Hz
Lowpass{{ '%0.2f'|format(lowpass) }} Hz
Projections{{ projs|join('
') | safe }}
Filenames{{ filenames|join('
') }}
Duration{{ duration }} (HH:MM:SS)
+ diff --git a/mne/io/bti/read.py b/mne/io/bti/read.py index 4af53112ae8..d05e2d9d941 100644 --- a/mne/io/bti/read.py +++ b/mne/io/bti/read.py @@ -48,12 +48,12 @@ def read_int8(fid): def read_uint16(fid): """Read unsigned 16bit integer from bti file.""" - return _unpack_simple(fid, ">u2", np.uint16) + return _unpack_simple(fid, ">u2", np.uint32) def read_int16(fid): """Read 16bit integer from bti file.""" - return _unpack_simple(fid, ">i2", np.int16) + return _unpack_simple(fid, ">i2", np.int32) def read_uint32(fid): @@ -88,7 +88,13 @@ def read_double(fid): def read_int16_matrix(fid, rows, cols): """Read 16bit integer matrix from bti file.""" - return _unpack_matrix(fid, rows, cols, dtype=">i2", out_dtype=np.int16) + return _unpack_matrix( + fid, + rows, + cols, + dtype=">i2", + out_dtype=np.int32, + ) def read_float_matrix(fid, rows, cols): diff --git a/mne/io/ctf/res4.py b/mne/io/ctf/res4.py index b5c0f884c99..2ea2f619bcc 100644 --- a/mne/io/ctf/res4.py +++ b/mne/io/ctf/res4.py @@ -43,7 +43,7 @@ def _read_ustring(fid, n_bytes): def _read_int2(fid): """Read int from short.""" - return np.fromfile(fid, ">i2", 1)[0] + return _auto_cast(np.fromfile(fid, ">i2", 1)[0]) def _read_int(fid): @@ -208,6 +208,9 @@ def _read_res4(dsdir): coil["area"] *= 1e-4 # convert to dict chs = [dict(zip(chs.dtype.names, x)) for x in chs] + for ch in chs: + for key, val in ch.items(): + ch[key] = _auto_cast(val) res["chs"] = chs for k in range(res["nchan"]): res["chs"][k]["ch_name"] = res["ch_names"][k] @@ -216,3 +219,15 @@ def _read_res4(dsdir): _read_comp_coeff(fid, res) logger.info(" res4 data read.") return res + + +def _auto_cast(x): + # Upcast scalars + if isinstance(x, np.ScalarType): + if x.dtype.kind == "i": + if x.dtype != np.int64: + x = x.astype(np.int64) + elif x.dtype.kind == "f": + if x.dtype != np.float64: + x = x.astype(np.float64) + return x diff --git a/mne/io/edf/edf.py b/mne/io/edf/edf.py index 8831989860c..d27aabae8a5 100644 --- a/mne/io/edf/edf.py +++ b/mne/io/edf/edf.py @@ -1106,7 +1106,7 @@ def _read_gdf_header(fname, exclude, include=None): "Header information is incorrect for record length. " "Default record length set to 1." ) - nchan = np.fromfile(fid, UINT32, 1)[0] + nchan = int(np.fromfile(fid, UINT32, 1)[0]) channels = list(range(nchan)) ch_names = [_edf_str(fid.read(16)).strip() for ch in channels] exclude = _find_exclude_idx(ch_names, exclude, include) @@ -1177,7 +1177,7 @@ def _read_gdf_header(fname, exclude, include=None): fid.seek(etp) etmode = np.fromfile(fid, UINT8, 1)[0] if etmode in (1, 3): - sr = np.fromfile(fid, UINT8, 3) + sr = np.fromfile(fid, UINT8, 3).astype(np.uint32) event_sr = sr[0] for i in range(1, len(sr)): event_sr = event_sr + sr[i] * 2 ** (i * 8) @@ -1297,7 +1297,7 @@ def _read_gdf_header(fname, exclude, include=None): "Default record length set to 1." ) - nchan = np.fromfile(fid, UINT16, 1)[0] + nchan = int(np.fromfile(fid, UINT16, 1)[0]) fid.seek(2, 1) # 2bytes reserved # Channels (variable header) @@ -1443,7 +1443,7 @@ def _read_gdf_header(fname, exclude, include=None): else: chn = np.zeros(n_events, dtype=np.uint32) dur = np.ones(n_events, dtype=np.uint32) - np.clip(dur, 1, np.inf, out=dur) + np.maximum(dur, 1, out=dur) events = [n_events, pos, typ, chn, dur] edf_info["event_sfreq"] = event_sr @@ -1878,7 +1878,7 @@ def read_raw_gdf( input_fname = os.path.abspath(input_fname) ext = os.path.splitext(input_fname)[1][1:].lower() if ext != "gdf": - raise NotImplementedError(f"Only BDF files are supported, got {ext}.") + raise NotImplementedError(f"Only GDF files are supported, got {ext}.") return RawGDF( input_fname=input_fname, eog=eog, diff --git a/mne/io/eeglab/eeglab.py b/mne/io/eeglab/eeglab.py index 9e08807db49..6cf92bfd7bf 100644 --- a/mne/io/eeglab/eeglab.py +++ b/mne/io/eeglab/eeglab.py @@ -165,17 +165,23 @@ def _get_montage_information(eeg, get_pos, *, montage_units): ) lpa, rpa, nasion = None, None, None - if hasattr(eeg, "chaninfo") and len(eeg.chaninfo.get("nodatchans", [])): - for item in list(zip(*eeg.chaninfo["nodatchans"].values())): - d = dict(zip(eeg.chaninfo["nodatchans"].keys(), item)) - if d.get("type", None) != "FID": + if hasattr(eeg, "chaninfo") and isinstance(eeg.chaninfo["nodatchans"], dict): + nodatchans = eeg.chaninfo["nodatchans"] + types = nodatchans.get("type", []) + descriptions = nodatchans.get("description", []) + xs = nodatchans.get("X", []) + ys = nodatchans.get("Y", []) + zs = nodatchans.get("Z", []) + + for type_, description, x, y, z in zip(types, descriptions, xs, ys, zs): + if type_ != "FID": continue - elif d.get("description", None) == "Nasion": - nasion = np.array([d["X"], d["Y"], d["Z"]]) - elif d.get("description", None) == "Right periauricular point": - rpa = np.array([d["X"], d["Y"], d["Z"]]) - elif d.get("description", None) == "Left periauricular point": - lpa = np.array([d["X"], d["Y"], d["Z"]]) + if description == "Nasion": + nasion = np.array([x, y, z]) + elif description == "Right periauricular point": + rpa = np.array([x, y, z]) + elif description == "Left periauricular point": + lpa = np.array([x, y, z]) # Always check this even if it's not used _check_option("montage_units", montage_units, ("m", "dm", "cm", "mm", "auto")) diff --git a/mne/io/egi/egi.py b/mne/io/egi/egi.py index d6ba4b884f6..0bd669837a3 100644 --- a/mne/io/egi/egi.py +++ b/mne/io/egi/egi.py @@ -29,7 +29,7 @@ def _read_header(fid): ) def my_fread(*x, **y): - return np.fromfile(*x, **y)[0] + return int(np.fromfile(*x, **y)[0]) info = dict( version=version, @@ -57,8 +57,8 @@ def my_fread(*x, **y): dict( n_categories=0, n_segments=1, - n_samples=np.fromfile(fid, ">i4", 1)[0], - n_events=np.fromfile(fid, ">i2", 1)[0], + n_samples=int(np.fromfile(fid, ">i4", 1)[0]), + n_events=int(np.fromfile(fid, ">i2", 1)[0]), event_codes=[], category_names=[], category_lengths=[], diff --git a/mne/io/egi/egimff.py b/mne/io/egi/egimff.py index b584acf9abd..1120324c58a 100644 --- a/mne/io/egi/egimff.py +++ b/mne/io/egi/egimff.py @@ -79,7 +79,7 @@ def _read_mff_header(filepath): # by what we need to (e.g., a sample rate of 500 means we can multiply # by 1 and divide by 2 rather than multiplying by 500 and dividing by # 1000) - numerator = signal_blocks["sfreq"] + numerator = int(signal_blocks["sfreq"]) denominator = 1000 this_gcd = math.gcd(numerator, denominator) numerator = numerator // this_gcd @@ -507,7 +507,17 @@ def __init__( " Excluding events {%s} ..." % ", ".join([k for i, k in enumerate(event_codes) if i not in include_]) ) - events_ids = np.arange(len(include_)) + 1 + if all(ch.startswith("D") for ch in include_names): + # support the DIN format DIN1, DIN2, ..., DIN9, DI10, DI11, ... DI99, + # D100, D101, ..., D255 that we get when sending 0-255 triggers on a + # parallel port. + events_ids = list() + for ch in include_names: + while not ch[0].isnumeric(): + ch = ch[1:] + events_ids.append(int(ch)) + else: + events_ids = np.arange(len(include_)) + 1 egi_info["new_trigger"] = _combine_triggers( egi_events[include_], remapping=events_ids ) diff --git a/mne/io/egi/tests/test_egi.py b/mne/io/egi/tests/test_egi.py index 7772d38f2d3..d57cb27359c 100644 --- a/mne/io/egi/tests/test_egi.py +++ b/mne/io/egi/tests/test_egi.py @@ -190,9 +190,9 @@ def test_io_egi_mff(): read_raw_egi(egi_mff_fname, include=["Foo"]) with pytest.raises(ValueError, match="Could not find event"): read_raw_egi(egi_mff_fname, exclude=["Bar"]) - for ii, k in enumerate(include, 1): - assert k in raw.event_id - assert raw.event_id[k] == ii + for ch in include: + assert ch in raw.event_id + assert raw.event_id[ch] == int(ch[-1]) def test_io_egi(): diff --git a/mne/io/nihon/nihon.py b/mne/io/nihon/nihon.py index ab1e476fc5d..b6b7e3179ff 100644 --- a/mne/io/nihon/nihon.py +++ b/mne/io/nihon/nihon.py @@ -207,7 +207,7 @@ def _read_nihon_header(fname): t_datablock["address"] = t_data_address fid.seek(t_data_address + 0x26) - t_n_channels = np.fromfile(fid, np.uint8, 1)[0] + t_n_channels = np.fromfile(fid, np.uint8, 1)[0].astype(np.int64) t_datablock["n_channels"] = t_n_channels t_channels = [] @@ -219,14 +219,14 @@ def _read_nihon_header(fname): t_datablock["channels"] = t_channels fid.seek(t_data_address + 0x1C) - t_record_duration = np.fromfile(fid, np.uint32, 1)[0] + t_record_duration = np.fromfile(fid, np.uint32, 1)[0].astype(np.int64) t_datablock["duration"] = t_record_duration fid.seek(t_data_address + 0x1A) sfreq = np.fromfile(fid, np.uint16, 1)[0] & 0x3FFF - t_datablock["sfreq"] = sfreq + t_datablock["sfreq"] = sfreq.astype(np.int64) - t_datablock["n_samples"] = int(t_record_duration * sfreq / 10) + t_datablock["n_samples"] = np.int64(t_record_duration * sfreq // 10) t_controlblock["datablocks"].append(t_datablock) controlblocks.append(t_controlblock) header["controlblocks"] = controlblocks diff --git a/mne/io/nsx/nsx.py b/mne/io/nsx/nsx.py index 5d3b2e7a659..a74bcd05f30 100644 --- a/mne/io/nsx/nsx.py +++ b/mne/io/nsx/nsx.py @@ -365,7 +365,7 @@ def _get_hdr_info(fname, stim_channel=True, eog=None, misc=None): stim_channel_idxs, _ = _check_stim_channel(stim_channel, ch_names) - nchan = nsx_info["channel_count"] + nchan = int(nsx_info["channel_count"]) logger.info("Setting channel info structure...") chs = list() pick_mask = np.ones(len(ch_names)) diff --git a/mne/io/tests/test_raw.py b/mne/io/tests/test_raw.py index 362fb293fdf..5cc017588e3 100644 --- a/mne/io/tests/test_raw.py +++ b/mne/io/tests/test_raw.py @@ -334,7 +334,7 @@ def _test_raw_reader( assert meas_date is None or meas_date >= _stamp_to_dt((0, 0)) # test repr_html - assert "Good channels" in raw.info._repr_html_() + assert "Good channels" in raw._repr_html_() # test resetting raw if test_kwargs: diff --git a/mne/minimum_norm/tests/test_time_frequency.py b/mne/minimum_norm/tests/test_time_frequency.py index e581b4ae694..7072faeda9d 100644 --- a/mne/minimum_norm/tests/test_time_frequency.py +++ b/mne/minimum_norm/tests/test_time_frequency.py @@ -6,7 +6,7 @@ from mne._fiff.constants import FIFF from mne.datasets import testing from mne.io import read_raw_fif -from mne.label import read_label +from mne.label import BiHemiLabel, read_label from mne.minimum_norm import ( INVERSE_METHODS, apply_inverse_epochs, @@ -27,6 +27,7 @@ ) fname_data = data_path / "MEG" / "sample" / "sample_audvis_trunc_raw.fif" fname_label = data_path / "MEG" / "sample" / "labels" / "Aud-lh.label" +fname_label2 = data_path / "MEG" / "sample" / "labels" / "Aud-rh.label" @testing.requires_testing_data @@ -129,9 +130,141 @@ def test_tfr_with_inverse_operator(method): method=method, prepared=True, ) + assert power.shape == phase_lock.shape assert np.all(phase_lock > 0) assert np.all(phase_lock <= 1) assert 5 < np.max(power) < 7 + # fairly precise spot check that our values match what we had on 2023/09/28 + if method != "eLORETA": + # check phase-lock using arbitrary index value since pl max is 1 + assert_allclose(phase_lock[1, 0, 0], 0.576, rtol=1e-3) + # check power + max_inds = np.unravel_index(np.argmax(power), power.shape) + assert_allclose(max_inds, [0, 11, 135]) + assert_allclose(power[max_inds], 6.05, rtol=1e-3) + + +@testing.requires_testing_data +def test_tfr_multi_label(): + """Test multi-label functionality.""" + tmin, tmax, event_id = -0.2, 0.5, 1 + + # Setup for reading the raw data + raw = read_raw_fif(fname_data) + events = find_events(raw, stim_channel="STI 014") + inv = read_inverse_operator(fname_inv) + inv = prepare_inverse_operator(inv, nave=1, lambda2=1.0 / 9.0, method="dSPM") + + raw.info["bads"] += ["MEG 2443", "EEG 053"] # bads + 2 more + + # picks MEG gradiometers + picks = pick_types( + raw.info, meg=True, eeg=False, eog=True, stim=False, exclude="bads" + ) + + # Load condition 1 + event_id = 1 + epochs = Epochs( + raw, + events[:3], # take 3 events to keep the computation time low + event_id, + tmin, + tmax, + picks=picks, + baseline=(None, 0), + reject=dict(grad=4000e-13, eog=150e-6), + preload=True, + ) + + freqs = np.arange(7, 30, 2) + + n_times = len(epochs.times) + n_freqs = len(freqs) + + # prepare labels + label = read_label(fname_label) # lh Aud + label2 = read_label(fname_label2) # rh Aud + labels = [label, label2] + bad_lab = label.copy() + bad_lab.vertices = np.hstack((label.vertices, [2121])) # add 1 unique vert + bad_lbls = [label, bad_lab] + nverts_lh = len(np.intersect1d(inv["src"][0]["vertno"], label.vertices)) + nverts_rh = len(np.intersect1d(inv["src"][1]["vertno"], label2.vertices)) + assert nverts_lh + 1 == nverts_rh == 3 + + # prepare instances of BiHemiLabel + fname_lvis = data_path / "MEG" / "sample" / "labels" / "Vis-lh.label" + fname_rvis = data_path / "MEG" / "sample" / "labels" / "Vis-rh.label" + lvis = read_label(fname_lvis) + rvis = read_label(fname_rvis) + bihl = BiHemiLabel(lh=label, rh=label2) # auditory labels + bihl.name = "Aud" + bihl2 = BiHemiLabel(lh=lvis, rh=rvis) # visual labels + bihl2.name = "Vis" + bihls = [bihl, bihl2] + bad_bihl = BiHemiLabel(lh=bad_lab, rh=rvis) # 1 unique vert on lh, rh ok + bad_bihls = [bihl, bad_bihl] + print("BiHemi label verts:", bihl.lh.vertices.shape, bihl.rh.vertices.shape) + + # check error handling + sip_kwargs = dict( + baseline=(-0.1, 0), + baseline_mode="mean", + n_cycles=2, + n_jobs=None, + return_plv=False, + method="dSPM", + prepared=True, + ) + # label input errors + with pytest.raises(TypeError, match="Label or BiHemi"): + source_induced_power(epochs, inv, freqs, label="bad_input", **sip_kwargs) + with pytest.raises(TypeError, match="Label or BiHemi"): + source_induced_power( + epochs, inv, freqs, label=[label, "bad_input"], **sip_kwargs + ) + + # error handling for multi-label and plv + sip_kwargs_bad = sip_kwargs.copy() + sip_kwargs_bad["return_plv"] = True + with pytest.raises(RuntimeError, match="value cannot be calculated"): + source_induced_power(epochs, inv, freqs, labels, **sip_kwargs_bad) + + # check multi-label handling + label_sets = dict(Label=(labels, bad_lbls), BiHemi=(bihls, bad_bihls)) + for ltype, lab_set in label_sets.items(): + n_verts = nverts_lh if ltype == "Label" else nverts_lh + nverts_rh + # check overlapping verts error handling + with pytest.raises(RuntimeError, match="overlapping vertices"): + source_induced_power(epochs, inv, freqs, lab_set[1], **sip_kwargs) + + # TODO someday, eliminate both levels of this nested for-loop and use + # pytest.mark.parametrize, but not unless/until the data IO and the loading / + # preparing of the inverse operator have been made into fixtures (the overhead + # of those operations makes it a bad idea to parametrize now) + for ori in (None, "normal"): # check loose and normal orientations + sip_kwargs.update(pick_ori=ori) + lbl = lab_set[0][0] + + # check label=Label vs label=[Label] + no_list_pow = source_induced_power( + epochs, inv, freqs, label=lbl, **sip_kwargs + ) + assert no_list_pow.shape == (n_verts, n_freqs, n_times) + + list_pow = source_induced_power( + epochs, inv, freqs, label=[lbl], **sip_kwargs + ) + assert list_pow.shape == (1, n_freqs, n_times) + + nlp_ave = np.mean(no_list_pow, axis=0) + assert_allclose(nlp_ave, list_pow[0], rtol=1e-3) + + # check label=[Label1, Label2] + multi_lab_pow = source_induced_power( + epochs, inv, freqs, label=lab_set[0], **sip_kwargs + ) + assert multi_lab_pow.shape == (2, n_freqs, n_times) @testing.requires_testing_data @@ -205,6 +338,7 @@ def test_source_psd_epochs(method): raw = read_raw_fif(fname_data) inverse_operator = read_inverse_operator(fname_inv) label = read_label(fname_label) + label2 = read_label(fname_label2) event_id, tmin, tmax = 1, -0.2, 0.5 lambda2 = 1.0 / 9.0 @@ -242,6 +376,7 @@ def test_source_psd_epochs(method): inv = prepare_inverse_operator( inverse_operator, nave=1, lambda2=1.0 / 9.0, method="dSPM" ) + # return list stc_psd = compute_source_psd_epochs( one_epochs, @@ -311,3 +446,7 @@ def test_source_psd_epochs(method): return_generator=False, prepared=True, ) + + # check error handling for label + with pytest.raises(TypeError, match="Label or BiHemi"): + compute_source_psd_epochs(one_epochs, inv, label=[label, label2]) diff --git a/mne/minimum_norm/time_frequency.py b/mne/minimum_norm/time_frequency.py index 3c037c67085..f9f5571ae9b 100644 --- a/mne/minimum_norm/time_frequency.py +++ b/mne/minimum_norm/time_frequency.py @@ -12,6 +12,7 @@ from ..event import make_fixed_length_events from ..evoked import EvokedArray from ..fixes import _safe_svd +from ..label import BiHemiLabel, Label from ..parallel import parallel_func from ..source_estimate import _make_stc from ..time_frequency.multitaper import ( @@ -21,7 +22,7 @@ _psd_from_mt_adaptive, ) from ..time_frequency.tfr import cwt, morlet -from ..utils import ProgressBar, _check_option, logger, verbose +from ..utils import ProgressBar, _check_option, _pl, _validate_type, logger, verbose from .inverse import ( INVERSE_METHODS, _assemble_kernel, @@ -33,6 +34,72 @@ ) +def _restrict_K_to_lbls(labels, K, noise_norm, vertno, pick_ori): + """Use labels to choose desired sources in the kernel.""" + verts_to_use = [[], []] + # create mask for K by compiling original vertices from vertno in labels + for ii in range(len(labels)): + lab = labels[ii] + # handle BiHemi labels; ok so long as no overlap w/ single hemi labels + if lab.hemi == "both": + l_verts = np.intersect1d(vertno[0], lab.lh.vertices) + r_verts = np.intersect1d(vertno[1], lab.rh.vertices) # output sorted + verts_to_use[0] += list(l_verts) + verts_to_use[1] += list(r_verts) + else: + hidx = 0 if lab.hemi == "lh" else 1 + verts = np.intersect1d(vertno[hidx], lab.vertices) + verts_to_use[hidx] += list(verts) + + # check that we don't have overlapping vertices in our labels + for ii in range(2): + if len(np.unique(verts_to_use[ii])) != len(verts_to_use[ii]): + raise RuntimeError( + "Labels cannot have overlapping vertices. " + "Please select labels with unique vertices " + "and try again." + ) + + # turn original vertex numbers from vertno into indices for K + K_mask = np.searchsorted(vertno[0], verts_to_use[0]) + r_kmask = np.searchsorted(vertno[1], verts_to_use[1]) + len(vertno[0]) + K_mask = np.hstack((K_mask, r_kmask)) + + # record which original vertices are at each index in out_K + hemis = ("lh", "rh") + ki_keys = [ + (hemis[hi], verts_to_use[hi][ii]) + for hi in range(2) + for ii in range(len(verts_to_use[hi])) + ] + ki_vals = list(range(len(K_mask))) + k_idxs = dict(zip(ki_keys, ki_vals)) + + # mask K, handling the orientation issue + len_allverts = len(vertno[0]) + len(vertno[1]) + if len(K) == len_allverts: + assert pick_ori == "normal" + out_K = K[K_mask] + else: + # here, K = [x0, y0, z0, x1, y1, z1 ...] + # we need to drop x, y and z of unused vertices + assert not pick_ori == "normal", pick_ori + assert len(K) == 3 * len_allverts, (len(K), len_allverts) + out_len = len(K_mask) * 3 + out_K = K[0:out_len] # get the correct-shaped array + for di in range(3): + K_pick = K[di::3] + out_K[di::3] = K_pick[K_mask] # set correct values for out + + out_vertno = verts_to_use + if noise_norm is not None: + out_nn = noise_norm[K_mask] + else: + out_nn = None + + return out_K, out_nn, out_vertno, k_idxs + + def _prepare_source_params( inst, inverse_operator, @@ -64,9 +131,26 @@ def _prepare_source_params( # This does all the data transformations to compute the weights for the # eigenleads # - K, noise_norm, vertno, _ = _assemble_kernel( - inv, label, method, pick_ori, use_cps=use_cps - ) + # K shape: (3 x n_sources, n_channels) or (n_sources, n_channels) + # noise_norm shape: (n_sources, 1) + # vertno: [lh_verts, rh_verts] + + k_idxs = None + if not isinstance(label, (Label, BiHemiLabel)): + whole_K, whole_noise_norm, whole_vertno, _ = _assemble_kernel( + inv, None, method, pick_ori, use_cps=use_cps + ) + if isinstance(label, list): + K, noise_norm, vertno, k_idxs = _restrict_K_to_lbls( + label, whole_K, whole_noise_norm, whole_vertno, pick_ori + ) + else: + assert not label + K, noise_norm, vertno = whole_K, whole_noise_norm, whole_vertno + elif isinstance(label, (Label, BiHemiLabel)): + K, noise_norm, vertno, _ = _assemble_kernel( + inv, label, method, pick_ori, use_cps=use_cps + ) if pca: U, s, Vh = _safe_svd(K, full_matrices=False) @@ -78,7 +162,7 @@ def _prepare_source_params( Vh = None is_free_ori = inverse_operator["source_ori"] == FIFF.FIFFV_MNE_FREE_ORI - return K, sel, Vh, vertno, is_free_ori, noise_norm + return K, sel, Vh, vertno, is_free_ori, noise_norm, k_idxs @verbose @@ -114,8 +198,9 @@ def source_band_induced_power( The inverse operator. bands : dict Example : bands = dict(alpha=[8, 9]). - label : Label - Restricts the source estimates to a given label. + label : Label | list of Label + Restricts the source estimates to a given label or list of labels. If + labels are provided in a list, power will be averaged over vertices. lambda2 : float The regularization parameter of the minimum norm. method : "MNE" | "dSPM" | "sLORETA" | "eLORETA" @@ -170,7 +255,10 @@ def source_band_induced_power( Returns ------- stcs : dict of SourceEstimate (or VolSourceEstimate) - The estimated source space induced power estimates. + The estimated source space induced power estimates in shape + (n_vertices, n_frequencies, n_samples) if label=None or label=label. + For lists of one or more labels, the induced power estimate has shape + (n_labels, n_frequencies, n_samples). """ # noqa: E501 _check_option("method", method, INVERSE_METHODS) @@ -262,6 +350,7 @@ def _compute_pow_plv( with_plv, pick_ori, decim, + noise_norm=None, verbose=None, ): """Aux function for induced power and PLV.""" @@ -292,6 +381,9 @@ def _compute_pow_plv( if with_plv: plv += plv_e + if noise_norm is not None: + power *= noise_norm[:, :, np.newaxis] ** 2 + return power, plv @@ -345,6 +437,41 @@ def _single_epoch_tfr( return tfr_e, plv_e +def _get_label_power(power, labels, vertno, k_idxs): + """Average power across vertices in labels.""" + (_, ps1, ps2) = power.shape + # construct out array with correct shape + out_power = np.zeros(shape=(len(labels), ps1, ps2)) + + # for each label, compile list of vertices we want + for li in np.arange(len(labels)): + lab = labels[li] + hemis = ("lh", "rh") + all_vnums = [[], []] + if lab.hemi == "both": + all_vnums[0] = np.intersect1d(lab.lh.vertices, vertno[0]) + all_vnums[1] = np.intersect1d(lab.rh.vertices, vertno[1]) + else: + assert lab.hemi == "lh" or lab.hemi == "rh" + h_id = 0 if lab.hemi == "lh" else 1 + all_vnums[h_id] = np.intersect1d(vertno[h_id], lab.vertices) + + verts = [(hemis[hi], vn) for hi in range(2) for vn in all_vnums[hi]] + + # restrict power to relevant vertices in label + lab_mask = np.array([False] * len(power)) + for vert in verts: + lab_mask[k_idxs[vert]] = True # k_idxs[vert] gives power row index + lab_power = power[lab_mask] # only pass through rows we want + assert lab_power.shape == (len(verts), ps1, ps2) + + # set correct out values for label + out_power[li, :, :] = np.mean(lab_power, axis=0) + + assert out_power.shape == (len(labels), ps1, ps2) + return out_power + + @verbose def _source_induced_power( epochs, @@ -368,8 +495,29 @@ def _source_induced_power( verbose=None, ): """Aux function for source induced power.""" + if label: + _validate_type( + label, + types=(Label, BiHemiLabel, list, tuple, None), + type_name=("Label or BiHemiLabel", "list of labels", "None"), + ) + if isinstance(label, (list, tuple)): + for item in label: + _validate_type( + item, + types=(Label, BiHemiLabel), + type_name=("Label or BiHemiLabel"), + ) + if len(label) > 1 and with_plv: + raise RuntimeError( + "Phase-locking value cannot be calculated " + "when averaging induced power within " + "labels. Please set `with_plv` to False, pass a " + "single `label=label`, or set `label=None`." + ) + epochs_data = epochs.get_data() - K, sel, Vh, vertno, is_free_ori, noise_norm = _prepare_source_params( + K, sel, Vh, vertno, is_free_ori, noise_norm, k_id = _prepare_source_params( inst=epochs, inverse_operator=inverse_operator, label=label, @@ -406,12 +554,26 @@ def _source_induced_power( with_power=True, pick_ori=pick_ori, decim=decim, + noise_norm=noise_norm, ) for data in np.array_split(epochs_data, n_jobs) ) - power = sum(o[0] for o in out) + power = sum(o[0] for o in out) # power shape: (n_verts, n_freqs, n_samps) power /= len(epochs_data) # average power over epochs + if isinstance(label, (Label, BiHemiLabel)): + logger.info( + f"Outputting power for {len(power)} vertices in label {label.name}." + ) + elif isinstance(label, list): + power = _get_label_power(power, label, vertno, k_id) + logger.info( + "Averaging induced power across vertices within labels " + f"for {len(label)} label{_pl(label)}." + ) + else: + assert not label + if with_plv: plv = sum(o[1] for o in out) plv = np.abs(plv) @@ -419,9 +581,6 @@ def _source_induced_power( else: plv = None - if noise_norm is not None: - power *= noise_norm[:, :, np.newaxis] ** 2 - return power, plv, vertno @@ -442,6 +601,8 @@ def source_induced_power( baseline_mode="logratio", pca=True, n_jobs=None, + *, + return_plv=True, zero_mean=False, prepared=False, method_params=None, @@ -460,8 +621,10 @@ def source_induced_power( The inverse operator. freqs : array Array of frequencies of interest. - label : Label - Restricts the source estimates to a given label. + label : Label | list of Label + Restricts the source estimates to a given label or list of labels. If + labels are provided in a list, power will be averaged over vertices within each + label. lambda2 : float The regularization parameter of the minimum norm. method : "MNE" | "dSPM" | "sLORETA" | "eLORETA" @@ -506,6 +669,10 @@ def source_induced_power( the time-frequency transforms. It reduces the computation times e.g. with a dataset that was maxfiltered (true dim is 64). %(n_jobs)s + return_plv : bool + If True, return the phase-locking value array. Else, only return power. + + .. versionadded:: 1.6 zero_mean : bool Make sure the wavelets are zero mean. prepared : bool @@ -520,7 +687,12 @@ def source_induced_power( Returns ------- power : array - The induced power. + The induced power array with shape (n_sources, n_freqs, n_samples) if + label=None or label=label. For lists of one or more labels, the induced + power estimate has shape (n_labels, n_frequencies, n_samples). + plv : array + The phase-locking value array with shape (n_sources, n_freqs, + n_samples). Only returned if ``return_plv=True``. """ # noqa: E501 _check_option("method", method, INVERSE_METHODS) _check_ori(pick_ori, inverse_operator["source_ori"], inverse_operator["src"]) @@ -539,6 +711,7 @@ def source_induced_power( pick_ori=pick_ori, pca=pca, n_jobs=n_jobs, + with_plv=return_plv, method_params=method_params, zero_mean=zero_mean, prepared=prepared, @@ -547,7 +720,9 @@ def source_induced_power( # Run baseline correction power = rescale(power, epochs.times[::decim], baseline, baseline_mode, copy=False) - return power, plv + + outs = (power, plv) if return_plv else power + return outs @verbose @@ -761,7 +936,17 @@ def _compute_source_psd_epochs( """Generate compute_source_psd_epochs.""" logger.info("Considering frequencies %g ... %g Hz" % (fmin, fmax)) - K, sel, Vh, vertno, is_free_ori, noise_norm = _prepare_source_params( + if label: + # TODO: add multi-label support + # since `_prepare_source_params` can handle a list of labels now, + # multi-label support should be within reach for psd calc as well + _validate_type( + label, + types=(Label, BiHemiLabel, None), + type_name=("Label or BiHemiLabel", "None"), + ) + + K, sel, Vh, vertno, is_free_ori, noise_norm, _ = _prepare_source_params( inst=epochs, inverse_operator=inverse_operator, label=label, diff --git a/mne/preprocessing/realign.py b/mne/preprocessing/realign.py index 396e4ba33e6..09442ca9b1c 100644 --- a/mne/preprocessing/realign.py +++ b/mne/preprocessing/realign.py @@ -28,8 +28,9 @@ def realign_raw(raw, other, t_raw, t_other, verbose=None): The second raw instance. It will be resampled to match ``raw``. t_raw : array-like, shape (n_events,) The times of shared events in ``raw`` relative to ``raw.times[0]`` (0). - Typically these could be events on some TTL channel like - ``find_events(raw)[:, 0] - raw.first_samp``. + Typically these could be events on some TTL channel such as:: + + find_events(raw)[:, 0] / raw.info["sfreq"] - raw.first_time t_other : array-like, shape (n_events,) The times of shared events in ``other`` relative to ``other.times[0]``. %(verbose)s @@ -92,11 +93,11 @@ def realign_raw(raw, other, t_raw, t_other, verbose=None): logger.info(f"Cropping {zero_ord:0.3f} s from the start of raw") raw.crop(zero_ord, None) t_raw -= zero_ord - else: # need to crop start of other to match raw - t_crop = zero_ord / first_ord + elif zero_ord < 0: # need to crop start of other to match raw + t_crop = -zero_ord / first_ord logger.info(f"Cropping {t_crop:0.3f} s from the start of other") - other.crop(-t_crop, None) - t_other += t_crop + other.crop(t_crop, None) + t_other -= t_crop # 3. Resample data using the first-order term nan_ch_names = [ diff --git a/mne/report/report.py b/mne/report/report.py index d1138e1e610..18ca4be3768 100644 --- a/mne/report/report.py +++ b/mne/report/report.py @@ -587,9 +587,6 @@ def _plot_ica_properties_as_arrays(*, ica, inst, picks, n_jobs): """ import matplotlib.pyplot as plt - if picks is None: - picks = list(range(ica.n_components_)) - def _plot_one_ica_property(*, ica, inst, pick): figs = ica.plot_properties(inst=inst, picks=pick, show=False) assert len(figs) == 1 @@ -1666,27 +1663,17 @@ def _add_ica_properties( ) return + if picks is None: + picks = list(range(ica.n_components_)) + figs = _plot_ica_properties_as_arrays( ica=ica, inst=inst, picks=picks, n_jobs=n_jobs ) - rel_explained_var = ( - ica.pca_explained_variance_ / ica.pca_explained_variance_.sum() - ) - cum_explained_var = np.cumsum(rel_explained_var) - captions = [] - for idx, rel_var, cum_var in zip( - range(len(figs)), - rel_explained_var[: len(figs)], - cum_explained_var[: len(figs)], - ): - caption = ( - f"ICA component {idx}. " f"Variance explained: {round(100 * rel_var)}%" - ) - if idx == 0: - caption += "." - else: - caption += f" ({round(100 * cum_var)}% cumulative)." + assert len(figs) == len(picks) + captions = [] + for idx in range(len(figs)): + caption = f"ICA component {picks[idx]}." captions.append(caption) title = "ICA component properties" diff --git a/mne/report/tests/test_report.py b/mne/report/tests/test_report.py index 7577774e313..67b065ad4fe 100644 --- a/mne/report/tests/test_report.py +++ b/mne/report/tests/test_report.py @@ -877,6 +877,8 @@ def test_survive_pickle(tmp_path): def test_manual_report_2d(tmp_path, invisible_fig): """Simulate user manually creating report by adding one file at a time.""" pytest.importorskip("sklearn") + pytest.importorskip("pandas") + from sklearn.exceptions import ConvergenceWarning r = Report(title="My Report") @@ -911,10 +913,10 @@ def test_manual_report_2d(tmp_path, invisible_fig): evoked = evokeds[0].pick("eeg") with pytest.warns(ConvergenceWarning, match="did not converge"): - ica = ICA(n_components=2, max_iter=1, random_state=42).fit( + ica = ICA(n_components=3, max_iter=1, random_state=42).fit( inst=raw.copy().crop(tmax=1) ) - ica_ecg_scores = ica_eog_scores = np.array([3, 0]) + ica_ecg_scores = ica_eog_scores = np.array([3, 0, 0]) ica_ecg_evoked = ica_eog_evoked = epochs_without_metadata.average() r.add_raw(raw=raw, title="my raw data", tags=("raw",), psd=True, projs=False) @@ -967,12 +969,13 @@ def test_manual_report_2d(tmp_path, invisible_fig): ica=ica, title="my ica with raw inst", inst=raw.copy().load_data(), - picks=[0], + picks=[2], ecg_evoked=ica_ecg_evoked, eog_evoked=ica_eog_evoked, ecg_scores=ica_ecg_scores, eog_scores=ica_eog_scores, ) + assert "ICA component 2" in r._content[-1].html epochs_baseline = epochs_without_metadata.copy().load_data() epochs_baseline.apply_baseline((None, 0)) r.add_ica( @@ -981,6 +984,7 @@ def test_manual_report_2d(tmp_path, invisible_fig): inst=epochs_baseline, picks=[0], ) + r.add_ica(ica=ica, title="my ica with picks=None", inst=epochs_baseline, picks=None) r.add_covariance(cov=cov, info=raw_fname, title="my cov") r.add_forward( forward=fwd_fname, diff --git a/mne/source_space/tests/test_source_space.py b/mne/source_space/tests/test_source_space.py index a0fe8dde4a1..afccc567074 100644 --- a/mne/source_space/tests/test_source_space.py +++ b/mne/source_space/tests/test_source_space.py @@ -386,6 +386,7 @@ def test_other_volume_source_spaces(tmp_path): """Test setting up other volume source spaces.""" # these are split off because they require the MNE tools, and # Travis doesn't seem to like them + pytest.importorskip("nibabel") # let's try the spherical one (no bem or surf supplied) temp_name = tmp_path / "temp-src.fif" @@ -562,6 +563,7 @@ def test_setup_source_space(tmp_path): @pytest.mark.parametrize("spacing", [2, 7]) def test_setup_source_space_spacing(tmp_path, spacing, monkeypatch): """Test setting up surface source spaces using a given spacing.""" + pytest.importorskip("nibabel") copytree(subjects_dir / "sample", tmp_path / "sample") args = [] if spacing == 7 else ["--spacing", str(spacing)] monkeypatch.setenv("SUBJECTS_DIR", str(tmp_path)) diff --git a/mne/surface.py b/mne/surface.py index b042361305a..d0aac3abe0d 100644 --- a/mne/surface.py +++ b/mne/surface.py @@ -772,7 +772,7 @@ def _call_old(self, rr, n_jobs): def _fread3(fobj): """Read 3 bytes and adjust.""" - b1, b2, b3 = np.fromfile(fobj, ">u1", 3) + b1, b2, b3 = np.fromfile(fobj, ">u1", 3).astype(np.int64) return (b1 << 16) + (b2 << 8) + b3 diff --git a/mne/tests/test_dipole.py b/mne/tests/test_dipole.py index c0079001528..ccfe0cb3241 100644 --- a/mne/tests/test_dipole.py +++ b/mne/tests/test_dipole.py @@ -101,6 +101,7 @@ def test_io_dipoles(tmp_path): @testing.requires_testing_data def test_dipole_fitting_ctf(): """Test dipole fitting with CTF data.""" + pytest.importorskip("nibabel") raw_ctf = read_raw_ctf(fname_ctf).set_eeg_reference(projection=True) events = make_fixed_length_events(raw_ctf, 1) evoked = Epochs(raw_ctf, events, 1, 0, 0, baseline=None).average() @@ -125,6 +126,7 @@ def test_dipole_fitting_ctf(): @requires_mne def test_dipole_fitting(tmp_path): """Test dipole fitting.""" + pytest.importorskip("nibabel") amp = 100e-9 rng = np.random.RandomState(0) fname_dtemp = tmp_path / "test.dip" diff --git a/mne/tests/test_epochs.py b/mne/tests/test_epochs.py index 9f72be1803a..423fe556365 100644 --- a/mne/tests/test_epochs.py +++ b/mne/tests/test_epochs.py @@ -3914,29 +3914,36 @@ def assert_metadata_equal(got, exp): @pytest.mark.parametrize( - ("all_event_id", "row_events", "keep_first", "keep_last"), + ("all_event_id", "row_events", "tmin", "tmax", "keep_first", "keep_last"), [ ( {"a/1": 1, "a/2": 2, "b/1": 3, "b/2": 4, "c": 32}, # all events None, + -0.5, + 1.5, None, None, ), - ({"a/1": 1, "a/2": 2}, None, None, None), # subset of events - (dict(), None, None, None), # empty set of events + ({"a/1": 1, "a/2": 2}, None, -0.5, 1.5, None, None), # subset of events + (dict(), None, -0.5, 1.5, None, None), # empty set of events ( {"a/1": 1, "a/2": 2, "b/1": 3, "b/2": 4, "c": 32}, ("a/1", "a/2", "b/1", "b/2"), + -0.5, + 1.5, ("a", "b"), "c", ), + # Test when tmin, tmax are None + ({"a/1": 1, "a/2": 2}, None, None, 1.5, None, None), # tmin is None + ({"a/1": 1, "a/2": 2}, None, -0.5, None, None, None), # tmax is None + ({"a/1": 1, "a/2": 2}, None, None, None, None, None), # tmin and tmax are None ], ) -def test_make_metadata(all_event_id, row_events, keep_first, keep_last): +def test_make_metadata(all_event_id, row_events, tmin, tmax, keep_first, keep_last): """Test that make_metadata works.""" pytest.importorskip("pandas") raw, all_events, _ = _get_data() - tmin, tmax = -0.5, 1.5 sfreq = raw.info["sfreq"] kwargs = dict( events=all_events, @@ -4005,6 +4012,80 @@ def test_make_metadata(all_event_id, row_events, keep_first, keep_last): Epochs(raw, events=events, event_id=event_id, metadata=metadata, verbose="warning") +def test_make_metadata_bounded_by_row_events(): + """Test make_metadata() with tmin, tmax set to None.""" + pytest.importorskip("pandas") + + sfreq = 100 + duration = 15 + n_chs = 10 + + # Define events and generate annotations + experimental_events = [ + # Beginning of recording until response (1st trial) + {"onset": 0.0, "description": "rec_start", "duration": 1 / sfreq}, + {"onset": 1.0, "description": "cue", "duration": 1 / sfreq}, + {"onset": 2.0, "description": "stim", "duration": 1 / sfreq}, + {"onset": 2.5, "description": "resp", "duration": 1 / sfreq}, + # 2nd trial + {"onset": 4.0, "description": "cue", "duration": 1 / sfreq}, + {"onset": 4.3, "description": "stim", "duration": 1 / sfreq}, + {"onset": 8.0, "description": "resp", "duration": 1 / sfreq}, + # 3rd trial until end of the recording + {"onset": 10.0, "description": "cue", "duration": 1 / sfreq}, + {"onset": 12.0, "description": "stim", "duration": 1 / sfreq}, + {"onset": 13.0, "description": "resp", "duration": 1 / sfreq}, + {"onset": 14.9, "description": "rec_end", "duration": 1 / sfreq}, + ] + + annots = mne.Annotations( + onset=[e["onset"] for e in experimental_events], + description=[e["description"] for e in experimental_events], + duration=[e["duration"] for e in experimental_events], + ) + + # Generate raw data, attach the annotations, and convert to events + rng = np.random.default_rng() + data = 1e-5 * rng.standard_normal((n_chs, sfreq * duration)) + info = mne.create_info( + ch_names=[f"EEG {i}" for i in range(n_chs)], sfreq=sfreq, ch_types="eeg" + ) + + raw = mne.io.RawArray(data=data, info=info) + raw.set_annotations(annots) + events, event_id = mne.events_from_annotations(raw=raw) + + metadata, events_new, event_id_new = mne.epochs.make_metadata( + events=events, + event_id=event_id, + tmin=None, + tmax=None, + sfreq=raw.info["sfreq"], + row_events="cue", + ) + + # We should have 3 rows in the metadata table in total. + # rec_start occurred before the first row_event, so should not be included + # rec_end occurred after the last row_event and should be included + + assert len(metadata) == 3 + assert (metadata["event_name"] == "cue").all() + assert (metadata["cue"] == 0.0).all() + + for row in metadata.itertuples(): + assert row.cue < row.stim < row.resp + assert np.isnan(row.rec_start) + + # Beginning of recording until end of 1st trial + assert np.isnan(metadata.iloc[0]["rec_end"]) + + # 2nd trial + assert np.isnan(metadata.iloc[1]["rec_end"]) + + # 3rd trial until end of the recording + assert metadata.iloc[2]["resp"] < metadata.iloc[2]["rec_end"] + + def test_events_list(): """Test that events can be a list.""" events = [[100, 0, 1], [200, 0, 1], [300, 0, 1]] diff --git a/mne/tests/test_surface.py b/mne/tests/test_surface.py index 60b9fed5a17..3513a32bb32 100644 --- a/mne/tests/test_surface.py +++ b/mne/tests/test_surface.py @@ -196,6 +196,7 @@ def test_decimate_surface_vtk(n_tri): @requires_freesurfer("mris_sphere") def test_decimate_surface_sphere(): """Test sphere mode of decimation.""" + pytest.importorskip("nibabel") rr, tris = _tessellate_sphere(3) assert len(rr) == 66 assert len(tris) == 128 diff --git a/mne/time_frequency/tests/test_spectrum.py b/mne/time_frequency/tests/test_spectrum.py index 7aaa5b40ea6..96fe89a2e6d 100644 --- a/mne/time_frequency/tests/test_spectrum.py +++ b/mne/time_frequency/tests/test_spectrum.py @@ -1,9 +1,9 @@ from contextlib import nullcontext from functools import partial -import matplotlib.pyplot as plt import numpy as np import pytest +from matplotlib.colors import same_color from numpy.testing import assert_allclose, assert_array_equal from mne import Annotations, create_info, make_fixed_length_epochs @@ -449,8 +449,16 @@ def test_plot_spectrum(kind, array, request): data, freqs = spectrum.get_data(return_freqs=True) Klass = SpectrumArray if kind == "raw" else EpochsSpectrumArray spectrum = Klass(data=data, info=spectrum.info, freqs=freqs) + spectrum.info["bads"] = spectrum.ch_names[:1] # one grad channel spectrum.plot(average=True, amplitude=True, spatial_colors=True) - spectrum.plot(average=False, amplitude=False, spatial_colors=False) + spectrum.plot(average=True, amplitude=False, spatial_colors=False) + n_grad = sum(ch_type == "grad" for ch_type in spectrum.get_channel_types()) + for amp, sc in ((True, True), (False, False)): + fig = spectrum.plot(average=False, amplitude=amp, spatial_colors=sc, exclude=()) + lines = fig.axes[0].lines[2:] # grads, ignore two vlines + assert len(lines) == n_grad + bad_color = "0.5" if sc else "r" + n_bad = sum(same_color(line.get_color(), bad_color) for line in lines) + assert n_bad == 1 spectrum.plot_topo() spectrum.plot_topomap() - plt.close("all") diff --git a/mne/utils/check.py b/mne/utils/check.py index a26495106e4..2faa364b779 100644 --- a/mne/utils/check.py +++ b/mne/utils/check.py @@ -8,7 +8,6 @@ import os import re from builtins import input # no-op here but facilitates testing -from collections.abc import Sequence from difflib import get_close_matches from importlib import import_module from importlib.metadata import version @@ -542,7 +541,7 @@ def __instancecheck__(cls, other): "path-like": path_like, "int-like": (int_like,), "callable": (_Callable(),), - "array-like": (Sequence, np.ndarray), + "array-like": (list, tuple, set, np.ndarray), } diff --git a/mne/utils/config.py b/mne/utils/config.py index 3c29b8d3b50..220ee49ed32 100644 --- a/mne/utils/config.py +++ b/mne/utils/config.py @@ -14,9 +14,13 @@ import subprocess import sys import tempfile -from functools import partial +from functools import lru_cache, partial from importlib import import_module from pathlib import Path +from urllib.error import URLError +from urllib.request import urlopen + +from packaging.version import parse from ._logging import logger, warn from .check import _check_fname, _check_option, _check_qt_version, _validate_type @@ -206,6 +210,7 @@ def set_memmap_min_size(memmap_min_size): "MNE_DATASETS_FNIRS", # mne-nirs "MNE_NIRS", # mne-nirs "MNE_KIT2FIFF", # mne-kit-gui + "MNE_ICALABEL", # mne-icalabel ) @@ -568,6 +573,7 @@ def _get_numpy_libs(): print(gi.renderer)""" +@lru_cache(maxsize=1) def _get_gpu_info(): # Once https://github.com/pyvista/pyvista/pull/2250 is merged and PyVista # does a release, we can triage based on version > 0.33.2 @@ -580,7 +586,14 @@ def _get_gpu_info(): return out -def sys_info(fid=None, show_paths=False, *, dependencies="user", unicode=True): +def sys_info( + fid=None, + show_paths=False, + *, + dependencies="user", + unicode=True, + check_version=True, +): """Print system information. This function prints system information useful when triaging bugs. @@ -599,9 +612,16 @@ def sys_info(fid=None, show_paths=False, *, dependencies="user", unicode=True): Include Unicode symbols in output. .. versionadded:: 0.24 + check_version : bool | float + If True (default), attempt to check that the version of MNE-Python is up to date + with the latest release on GitHub. Can be a float to give a different timeout + (in sec) from the default (2 sec). + + .. versionadded:: 1.6 """ _validate_type(dependencies, str) _check_option("dependencies", dependencies, ("user", "developer")) + _validate_type(check_version, (bool, "numeric"), "check_version") ljust = 24 if dependencies == "developer" else 21 platform_str = platform.platform() if platform.system() == "Darwin" and sys.version_info[:2] < (3, 8): @@ -694,6 +714,7 @@ def sys_info(fid=None, show_paths=False, *, dependencies="user", unicode=True): unicode = unicode and (sys.stdout.encoding.lower().startswith("utf")) except Exception: # in case someone overrides sys.stdout in an unsafe way unicode = False + mne_version_good = True for mi, mod_name in enumerate(use_mod_names): # upcoming break if mod_name == "": # break @@ -718,7 +739,16 @@ def sys_info(fid=None, show_paths=False, *, dependencies="user", unicode=True): except Exception: unavailable.append(mod_name) else: - out(f"{pre}☑ " if unicode else " + ") + mark = "☑" if unicode else "+" + mne_extra = "" + if mod_name == "mne" and check_version: + timeout = 2.0 if check_version is True else float(check_version) + mne_version_good, mne_extra = _check_mne_version(timeout) + if mne_version_good is None: + mne_version_good = True + elif not mne_version_good: + mark = "☒" if unicode else "X" + out(f"{pre}{mark} " if unicode else f" {mark} ") out(f"{mod_name}".ljust(ljust)) if mod_name == "vtk": vtk_version = mod.vtkVersion() @@ -746,6 +776,9 @@ def sys_info(fid=None, show_paths=False, *, dependencies="user", unicode=True): out(" (OpenGL unavailable)") else: out(f" (OpenGL {version} via {renderer})") + elif mod_name == "mne": + out(f" ({mne_extra})") + # Now comes stuff after the version if show_paths: if last: pre = " " @@ -755,3 +788,42 @@ def sys_info(fid=None, show_paths=False, *, dependencies="user", unicode=True): pre = " | " out(f'\n{pre}{" " * ljust}{op.dirname(mod.__file__)}') out("\n") + + if not mne_version_good: + out( + "\nTo update to the latest supported release version to get bugfixes and " + "improvements, visit " + "https://mne.tools/stable/install/updating.html\n" + ) + + +def _get_latest_version(timeout): + # Bandit complains about urlopen, but we know the URL here + url = "https://api.github.com/repos/mne-tools/mne-python/releases/latest" + try: + with urlopen(url, timeout=timeout) as f: # nosec + response = json.load(f) + except URLError as err: + # Triage error type + if "SSL" in str(err): + return "SSL error" + elif "timed out" in str(err): + return f"timeout after {timeout} sec" + else: + return f"unknown error: {str(err)}" + else: + return response["tag_name"].lstrip("v") or "version unknown" + + +def _check_mne_version(timeout): + rel_ver = _get_latest_version(timeout) + if not rel_ver[0].isnumeric(): + return None, (f"unable to check for latest version on GitHub, {rel_ver}") + rel_ver = parse(rel_ver) + this_ver = parse(import_module("mne").__version__) + if this_ver > rel_ver: + return True, f"devel, latest release is {rel_ver}" + if this_ver == rel_ver: + return True, "latest release" + else: + return False, f"outdated, release {rel_ver} is available!" diff --git a/mne/utils/tests/test_check.py b/mne/utils/tests/test_check.py index 2856d9ea37b..2cda3188cd8 100644 --- a/mne/utils/tests/test_check.py +++ b/mne/utils/tests/test_check.py @@ -212,6 +212,11 @@ def test_validate_type(): _validate_type(1, "int-like") with pytest.raises(TypeError, match="int-like"): _validate_type(False, "int-like") + _validate_type([1, 2, 3], "array-like") + _validate_type((1, 2, 3), "array-like") + _validate_type({1, 2, 3}, "array-like") + with pytest.raises(TypeError, match="array-like"): + _validate_type("123", "array-like") # a string is not array-like def test_check_range(): diff --git a/mne/utils/tests/test_config.py b/mne/utils/tests/test_config.py index d29aa43feda..fe802734f67 100644 --- a/mne/utils/tests/test_config.py +++ b/mne/utils/tests/test_config.py @@ -1,15 +1,21 @@ import os import platform +import re +from functools import partial from pathlib import Path +from urllib.error import URLError import pytest +import mne +import mne.utils.config from mne.utils import ( ClosingStringIO, _get_stim_channel, get_config, get_config_path, get_subjects_dir, + requires_good_network, set_config, set_memmap_min_size, sys_info, @@ -95,7 +101,7 @@ def test_config(tmp_path): def test_sys_info(): """Test info-showing utility.""" out = ClosingStringIO() - sys_info(fid=out) + sys_info(fid=out, check_version=False) out = out.getvalue() assert "numpy" in out @@ -109,7 +115,7 @@ def test_sys_info_qt_browser(): """Test if mne_qt_browser is correctly detected.""" pytest.importorskip("mne_qt_browser") out = ClosingStringIO() - sys_info(fid=out) + sys_info(fid=out, check_version=False) out = out.getvalue() assert "mne-qt-browser" in out @@ -134,3 +140,67 @@ def test_get_subjects_dir(tmp_path, monkeypatch): monkeypatch.setenv("HOME", str(tmp_path)) monkeypatch.setenv("USERPROFILE", str(tmp_path)) # Windows assert str(get_subjects_dir("~/foo")) == str(subjects_dir) + + +@pytest.mark.slowtest +@requires_good_network +def test_sys_info_check_outdated(monkeypatch): + """Test sys info checking.""" + # Old (actually ping GitHub) + monkeypatch.setattr(mne, "__version__", "0.1") + out = ClosingStringIO() + sys_info(fid=out, check_version=10) + out = out.getvalue() + assert "(outdated, release " in out + assert "updating.html" in out + + # Timeout (will call urllib.open) + out = ClosingStringIO() + sys_info(fid=out, check_version=1e-12) + out = out.getvalue() + assert re.match(".*unable to check.*timeout.*", out, re.DOTALL) is not None + assert "updating.html" not in out + + +def test_sys_info_check_other(monkeypatch): + """Test other failure modes of the sys info check.""" + + def bad_open(url, timeout, msg): + raise URLError(msg) + + # SSL error + out = ClosingStringIO() + with monkeypatch.context() as m: + m.setattr(mne.utils.config, "urlopen", partial(bad_open, msg="SSL: CERT")) + sys_info(fid=out) + out = out.getvalue() + assert re.match(".*unable to check.*SSL.*", out, re.DOTALL) is not None + + # Other error + out = ClosingStringIO() + with monkeypatch.context() as m: + m.setattr(mne.utils.config, "urlopen", partial(bad_open, msg="foo bar")) + sys_info(fid=out) + out = out.getvalue() + match = re.match(".*unable to .*unknown error: .*foo bar.*", out, re.DOTALL) + assert match is not None + + # Match + monkeypatch.setattr( + mne.utils.config, + "_get_latest_version", + lambda timeout: "1.5.1", + ) + monkeypatch.setattr(mne, "__version__", "1.5.1") + out = ClosingStringIO() + sys_info(fid=out) + out = out.getvalue() + assert " 1.5.1 (latest release)" in out + + # Devel + monkeypatch.setattr(mne, "__version__", "1.6.dev0") + out = ClosingStringIO() + sys_info(fid=out) + out = out.getvalue() + assert "devel, " in out + assert "updating.html" not in out diff --git a/mne/utils/tests/test_misc.py b/mne/utils/tests/test_misc.py index aca5efa5fbc..6892d561777 100644 --- a/mne/utils/tests/test_misc.py +++ b/mne/utils/tests/test_misc.py @@ -24,8 +24,8 @@ def test_html_repr(): os.environ[key] = "True" # HTML repr on info = mne.create_info(10, 256) r = info._repr_html_() - assert r.startswith("") + assert r.startswith("
") os.environ[key] = "False" # HTML repr off r = info._repr_html_() assert r.startswith("
")
diff --git a/mne/viz/_3d.py b/mne/viz/_3d.py
index 14edb396edf..ece5798b582 100644
--- a/mne/viz/_3d.py
+++ b/mne/viz/_3d.py
@@ -1252,10 +1252,9 @@ def _orient_glyphs(
     proj_pts, proj_nn = _get_nearest(nearest, check_inside, project_to_trans, proj_rr)
     vec = pts - proj_pts  # point to the surface
     nn = proj_nn
+    scalars = np.ones(len(pts))
     if mark_inside and not project_to_surface:
-        scalars = (~check_inside(proj_rr)).astype(int)
-    else:
-        scalars = np.ones(len(pts))
+        scalars[:] = ~check_inside(proj_rr)
     dist = np.linalg.norm(vec, axis=-1, keepdims=True)
     vectors = (250 * dist + 1) * nn
     return scalars, vectors, proj_pts
@@ -1277,28 +1276,16 @@ def _plot_glyphs(
     check_inside=None,
     nearest=None,
 ):
+    from matplotlib.colors import ListedColormap, to_rgba
+
+    _validate_type(mark_inside, bool, "mark_inside")
     if surf is not None and len(loc) > 0:
         defaults = DEFAULTS["coreg"]
         scalars, vectors, proj_pts = _orient_glyphs(
             loc, surf, project_points, mark_inside, check_inside, nearest
         )
         if mark_inside:
-            from matplotlib.colors import ListedColormap
-
-            color = np.append(color, 1)
-            colormap = ListedColormap(
-                np.array(
-                    [
-                        (
-                            0,
-                            0,
-                            0,
-                            1,
-                        ),
-                        color,
-                    ]
-                )
-            )
+            colormap = ListedColormap([to_rgba("darkslategray"), to_rgba(color)])
             color = None
             clim = [0, 1]
         else:
diff --git a/mne/viz/backends/_pyvista.py b/mne/viz/backends/_pyvista.py
index b09314462dc..5cb89179ef5 100644
--- a/mne/viz/backends/_pyvista.py
+++ b/mne/viz/backends/_pyvista.py
@@ -655,6 +655,9 @@ def quiver3d(
         clim=None,
     ):
         _check_option("mode", mode, ALLOWED_QUIVER_MODES)
+        _validate_type(scale_mode, str, "scale_mode")
+        scale_map = dict(none=False, scalar="scalars", vector="vec")
+        _check_option("scale_mode", scale_mode, list(scale_map))
         with warnings.catch_warnings():
             warnings.filterwarnings("ignore", category=FutureWarning)
             factor = scale
@@ -667,7 +670,10 @@ def quiver3d(
             grid = UnstructuredGrid(*args)
             if scalars is None:
                 scalars = np.ones((n_points,))
-            grid.point_data["scalars"] = np.array(scalars)
+                mesh_scalars = None
+            else:
+                mesh_scalars = "scalars"
+            grid.point_data["scalars"] = np.array(scalars, float)
             grid.point_data["vec"] = vectors
             if mode == "2darrow":
                 return _arrow_glyph(grid, factor), grid
@@ -715,14 +721,17 @@ def quiver3d(
                 glyph.Update()
                 geom = glyph.GetOutput()
                 mesh = grid.glyph(
-                    orient="vec", scale=scale_mode == "vector", factor=factor, geom=geom
+                    orient="vec",
+                    scale=scale_map[scale_mode],
+                    factor=factor,
+                    geom=geom,
                 )
             actor = _add_mesh(
                 self.plotter,
                 mesh=mesh,
                 color=color,
                 opacity=opacity,
-                scalars=None,
+                scalars=mesh_scalars,
                 colormap=colormap,
                 show_scalar_bar=False,
                 backface_culling=backface_culling,
diff --git a/mne/viz/evoked.py b/mne/viz/evoked.py
index 6abcbcc0d1d..1c6712a6bec 100644
--- a/mne/viz/evoked.py
+++ b/mne/viz/evoked.py
@@ -679,15 +679,17 @@ def _plot_lines(
                     _handle_spatial_colors(
                         colors, info, idx, this_type, psd, ax, sphere
                     )
+                    bad_color = (0.5, 0.5, 0.5)
                 else:
                     if isinstance(_spat_col, (tuple, str)):
                         col = [_spat_col]
                     else:
                         col = ["k"]
+                    bad_color = "r"
                     colors = col * len(idx)
-                    for i in bad_ch_idx:
-                        if i in idx:
-                            colors[idx.index(i)] = "r"
+                for i in bad_ch_idx:
+                    if i in idx:
+                        colors[idx.index(i)] = bad_color
 
                 if zorder == "std":
                     # find the channels with the least activity
diff --git a/mne/viz/tests/test_evoked.py b/mne/viz/tests/test_evoked.py
index c089b064d4a..51b83f222fa 100644
--- a/mne/viz/tests/test_evoked.py
+++ b/mne/viz/tests/test_evoked.py
@@ -16,6 +16,7 @@
 import pytest
 from matplotlib import gridspec
 from matplotlib.collections import PolyCollection
+from matplotlib.colors import same_color
 from mpl_toolkits.axes_grid1.parasite_axes import HostAxes  # spatial_colors
 from numpy.testing import assert_allclose
 
@@ -134,6 +135,12 @@ def test_plot_evoked():
     amplitudes = _get_amplitudes(fig)
     assert len(amplitudes) == len(default_picks)
     assert evoked.proj is False
+    assert evoked.info["bads"] == ["MEG 2641", "EEG 004"]
+    eeg_lines = fig.axes[2].lines
+    n_eeg = sum(ch_type == "eeg" for ch_type in evoked.get_channel_types())
+    assert len(eeg_lines) == n_eeg == 4
+    n_bad = sum(same_color(line.get_color(), "0.5") for line in eeg_lines)
+    assert n_bad == 1
     # Test a click
     ax = fig.get_axes()[0]
     line = ax.lines[0]
diff --git a/mne/viz/utils.py b/mne/viz/utils.py
index c81bdf354c2..e9c36281bae 100644
--- a/mne/viz/utils.py
+++ b/mne/viz/utils.py
@@ -2506,6 +2506,7 @@ def _plot_psd(
     if not average:
         picks = np.concatenate(picks_list)
         info = pick_info(inst.info, sel=picks, copy=True)
+        bad_ch_idx = [info["ch_names"].index(ch) for ch in info["bads"]]
         types = np.array(info.get_channel_types())
         ch_types_used = list()
         for this_type in _VALID_CHANNEL_TYPES:
@@ -2538,7 +2539,7 @@ def _plot_psd(
             xlim=(freqs[0], freqs[-1]),
             ylim=None,
             times=freqs,
-            bad_ch_idx=[],
+            bad_ch_idx=bad_ch_idx,
             titles=titles,
             ch_types_used=ch_types_used,
             selectable=True,
diff --git a/pyproject.toml b/pyproject.toml
index 0dc29069335..f28204f5bca 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -140,3 +140,17 @@ exclude = "(dist/)|(build/)|(.*\\.ipynb)"
 
 [tool.bandit.assert_used]
 skips = ["*/test_*.py"] # assert statements are good practice with pytest
+
+[tool.rstcheck]
+report_level = "WARNING"
+ignore_roles = [
+    "func", "class", "term", "ref", "doc", "gh", "file", "samp", "meth", "mod", "kbd",
+    "newcontrib", "footcite", "footcite:t", "eq", "py:mod", "attr", "py:class", "exc",
+]
+ignore_directives = [
+    "rst-class", "tab-set", "grid", "toctree", "footbibliography", "autosummary",
+    "currentmodule", "automodule", "cssclass", "tabularcolumns", "minigallery",
+    "autoclass", "highlight", "dropdown", "graphviz", "glossary", "autofunction",
+    "bibliography",
+]
+ignore_messages = "^.*(Unknown target name|Undefined substitution referenced)[^`]*$"
diff --git a/tools/azure_dependencies.sh b/tools/azure_dependencies.sh
index 072665d9c3c..d3ce1a98119 100755
--- a/tools/azure_dependencies.sh
+++ b/tools/azure_dependencies.sh
@@ -9,7 +9,8 @@ elif [ "${TEST_MODE}" == "pip-pre" ]; then
 	python -m pip install $STD_ARGS pip setuptools wheel packaging setuptools_scm
 	python -m pip install $STD_ARGS --only-binary ":all:" --extra-index-url "https://www.riverbankcomputing.com/pypi/simple" PyQt6 PyQt6-sip PyQt6-Qt6
 	echo "Numpy etc."
-	python -m pip install $STD_ARGS --only-binary ":all:" --extra-index-url "https://pypi.anaconda.org/scientific-python-nightly-wheels/simple" "numpy>=2.0.0.dev0" "scipy>=1.12.0.dev0" statsmodels pandas scikit-learn matplotlib
+	# As of 2023/10/25 no pandas (or statsmodels) because they pin to NumPy < 2
+	python -m pip install $STD_ARGS --only-binary ":all:" --extra-index-url "https://pypi.anaconda.org/scientific-python-nightly-wheels/simple" "numpy>=2.0.0.dev0" "scipy>=1.12.0.dev0" scikit-learn matplotlib
 	echo "dipy"
 	python -m pip install $STD_ARGS --only-binary ":all:" --extra-index-url "https://pypi.anaconda.org/scipy-wheels-nightly/simple" dipy
 	echo "h5py"
@@ -24,7 +25,7 @@ elif [ "${TEST_MODE}" == "pip-pre" ]; then
 	echo "misc"
 	python -m pip install $STD_ARGS imageio-ffmpeg xlrd mffpy python-picard pillow
 	echo "nibabel with workaround"
-	python -m pip install --progress-bar off git+https://github.com/mscheltienne/nibabel.git@np.sctypes
+	python -m pip install --progress-bar off git+https://github.com/nipy/nibabel.git
 	echo "joblib"
 	python -m pip install --progress-bar off git+https://github.com/joblib/joblib@master
 	echo "EDFlib-Python"
diff --git a/tools/github_actions_dependencies.sh b/tools/github_actions_dependencies.sh
index d08e5727e77..c5f4dd0ea7e 100755
--- a/tools/github_actions_dependencies.sh
+++ b/tools/github_actions_dependencies.sh
@@ -1,5 +1,7 @@
 #!/bin/bash -ef
 
+set -o pipefail
+
 STD_ARGS="--progress-bar off --upgrade"
 if [ ! -z "$CONDA_ENV" ]; then
 	echo "Uninstalling MNE for CONDA_ENV=${CONDA_ENV}"
@@ -18,7 +20,8 @@ else
 	echo "PyQt6"
 	pip install $STD_ARGS --only-binary ":all:" --default-timeout=60 --extra-index-url https://www.riverbankcomputing.com/pypi/simple PyQt6
 	echo "NumPy/SciPy/pandas etc."
-	pip install $STD_ARGS --only-binary ":all:" --default-timeout=60 --extra-index-url "https://pypi.anaconda.org/scientific-python-nightly-wheels/simple" "numpy>=2.0.0.dev0" scipy scikit-learn pandas matplotlib pillow statsmodels
+	# As of 2023/10/25 no pandas (or statsmodels, nilearn) because they pin to NumPy < 2
+	pip install $STD_ARGS --only-binary ":all:" --default-timeout=60 --extra-index-url "https://pypi.anaconda.org/scientific-python-nightly-wheels/simple" "numpy>=2.0.0.dev0" scipy scikit-learn matplotlib pillow
 	echo "dipy"
 	pip install $STD_ARGS --only-binary ":all:" --default-timeout=60 --extra-index-url "https://pypi.anaconda.org/scipy-wheels-nightly/simple" dipy
 	echo "H5py"
@@ -27,7 +30,8 @@ else
 	pip install $STD_ARGS --only-binary ":all:" --extra-index-url "https://test.pypi.org/simple" openmeeg
 	# No Numba because it forces an old NumPy version
 	echo "nilearn and openmeeg"
-	pip install $STD_ARGS git+https://github.com/nilearn/nilearn
+	# pip install $STD_ARGS git+https://github.com/nilearn/nilearn
+	pip install $STD_ARGS openmeeg
 	echo "VTK"
 	pip install $STD_ARGS --only-binary ":all:" --extra-index-url "https://wheels.vtk.org" vtk
 	python -c "import vtk"
@@ -40,14 +44,17 @@ else
 	echo "mne-qt-browser"
 	pip install $STD_ARGS git+https://github.com/mne-tools/mne-qt-browser
 	echo "nibabel with workaround"
-	pip install $STD_ARGS git+https://github.com/mscheltienne/nibabel.git@np.sctypes
+	pip install $STD_ARGS git+https://github.com/nipy/nibabel.git
 	echo "joblib"
 	pip install $STD_ARGS git+https://github.com/joblib/joblib@master
 	echo "EDFlib-Python"
 	pip install $STD_ARGS git+https://gitlab.com/Teuniz/EDFlib-Python@master
+	# Until Pandas is fixed, make sure we didn't install it
+	! python -c "import pandas"
 fi
 echo ""
 
+
 # for compat_minimal and compat_old, we don't want to --upgrade
 if [ ! -z "$CONDA_DEPENDENCIES" ]; then
 	echo "Installing dependencies for conda"
diff --git a/tutorials/intro/30_info.py b/tutorials/intro/30_info.py
index 2de72747528..2df1c17e87b 100644
--- a/tutorials/intro/30_info.py
+++ b/tutorials/intro/30_info.py
@@ -5,8 +5,8 @@
 The Info data structure
 =======================
 
-This tutorial describes the :class:`mne.Info` data structure, which keeps track
-of various recording details, and is attached to :class:`~mne.io.Raw`,
+This tutorial describes the :class:`mne.Info` data structure, which keeps track of
+various recording details, and is attached to :class:`~mne.io.Raw`,
 :class:`~mne.Epochs`, and :class:`~mne.Evoked` objects.
 
 We will begin by loading the Python modules we need, and loading the same
diff --git a/tutorials/intro/70_report.py b/tutorials/intro/70_report.py
index a7d3b02b2b3..bbbb4ab2abf 100644
--- a/tutorials/intro/70_report.py
+++ b/tutorials/intro/70_report.py
@@ -17,8 +17,7 @@
 HTML pages it generates are self-contained and do not require a running Python
 environment. However, it is less flexible as you can't change code and re-run
 something directly within the browser. This tutorial covers the basics of
-building a report. As usual, we will start by importing the modules and data we
-need:
+building a report. As usual, we will start by importing the modules and data we need:
 """
 
 # %%
@@ -267,7 +266,7 @@
 report.add_ica(
     ica=ica,
     title="ICA cleaning",
-    picks=[0, 1],  # only plot the first two components
+    picks=ica.exclude,  # plot the excluded EOG components
     inst=raw,
     eog_evoked=eog_epochs.average(),
     eog_scores=eog_scores,
diff --git a/tutorials/preprocessing/60_maxwell_filtering_sss.py b/tutorials/preprocessing/60_maxwell_filtering_sss.py
index c1453528975..f07caa46257 100644
--- a/tutorials/preprocessing/60_maxwell_filtering_sss.py
+++ b/tutorials/preprocessing/60_maxwell_filtering_sss.py
@@ -326,7 +326,7 @@
 # %%
 # Head position data can be computed using
 # :func:`mne.chpi.compute_chpi_locs` and :func:`mne.chpi.compute_head_pos`,
-# or loaded with the:func:`mne.chpi.read_head_pos` function. The
+# or loaded with the :func:`mne.chpi.read_head_pos` function. The
 # :ref:`example data ` doesn't include cHPI, so here we'll
 # load a :file:`.pos` file used for testing, just to demonstrate: