diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index f0665efc164..2089f60cc99 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -80,7 +80,7 @@ stages:
jobs:
- job: Ultraslow_PG
pool:
- vmImage: 'ubuntu-20.04'
+ vmImage: 'ubuntu-22.04'
variables:
DISPLAY: ':99'
OPENBLAS_NUM_THREADS: '1'
@@ -107,7 +107,7 @@ stages:
- bash: |
set -e
python -m pip install --progress-bar off --upgrade pip setuptools wheel
- python -m pip install --progress-bar off mne-qt-browser[opengl] pyvista scikit-learn pytest-error-for-skips python-picard "PySide6!=6.3.0,!=6.4.0,!=6.4.0.1,!=6.5.0" qtpy
+ python -m pip install --progress-bar off mne-qt-browser[opengl] pyvista scikit-learn pytest-error-for-skips python-picard "PySide6!=6.5.1" qtpy
python -m pip uninstall -yq mne
python -m pip install --progress-bar off --upgrade -e .[test]
displayName: 'Install dependencies with pip'
@@ -149,7 +149,7 @@ stages:
- job: Qt
pool:
- vmImage: 'ubuntu-20.04'
+ vmImage: 'ubuntu-22.04'
variables:
DISPLAY: ':99'
OPENBLAS_NUM_THREADS: '1'
@@ -196,6 +196,8 @@ stages:
- bash: |
set -e
python -m pip install PyQt6
+ # Uncomment if "xcb not found" Qt errors/segfaults come up again
+ # LD_DEBUG=libs python -c "from PyQt6.QtWidgets import QApplication, QWidget; app = QApplication([]); import matplotlib; matplotlib.use('QtAgg'); import matplotlib.pyplot as plt; plt.figure()"
mne sys_info -pd
mne sys_info -pd | grep "qtpy .* (PyQt6=.*)$"
pytest -m "not slowtest" ${TEST_OPTIONS}
diff --git a/doc/_includes/institutional-partners.rst b/doc/_includes/institutional-partners.rst
index 61cd2ae9a4e..46fcc1ede4e 100644
--- a/doc/_includes/institutional-partners.rst
+++ b/doc/_includes/institutional-partners.rst
@@ -19,6 +19,7 @@ Current partners
- `Children’s Hospital of Philadelphia Research Institute `_
- `Donders Institute for Brain, Cognition and Behaviour at Radboud University `_
- `Harvard Medical School `_
+- `Human Neuroscience Platform at Fondation Campus Biotech Geneva `_
- `Institut national de recherche en informatique et en automatique `_
- `Karl-Franzens-Universität Graz `_
- `Massachusetts General Hospital `_
diff --git a/doc/_static/institution_logos/FCBG.svg b/doc/_static/institution_logos/FCBG.svg
new file mode 100644
index 00000000000..c2c930fe5af
--- /dev/null
+++ b/doc/_static/institution_logos/FCBG.svg
@@ -0,0 +1,82 @@
+
+
+
+
diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc
index ca78884137a..a73a5b20f40 100644
--- a/doc/changes/latest.inc
+++ b/doc/changes/latest.inc
@@ -23,13 +23,24 @@ Current (1.5.dev0)
Enhancements
~~~~~~~~~~~~
-- None yet
+- Add ``cmap`` argument for the :func:`mne.viz.plot_sensors` (:gh:`11720` by :newcontrib:`Gennadiy Belonosov`)
+- When failing to locate a file, we now print the full path in quotation marks to help spot accidentally added trailing spaces (:gh:`11718` by `Richard Höchenberger`_)
+- Add standard montage lookup table for ``easycap-M43`` (:gh:`11744` by :newcontrib:`Diptyajit Das`)
+- Added :class:`mne.preprocessing.eyetracking.Calibration` to store eye-tracking calibration info, and :func:`mne.preprocessing.eyetracking.read_eyelink_calibration` to read calibration data from EyeLink systems (:gh:`11719` by `Scott Huberty`_)
Bugs
~~~~
+- Fix bug that required curv.*h files to create Brain object (:gh:`11704` by :newcontrib:`Aaron Earle-Richardson`)
+- Extended test to highlight bug in :func:`mne.stats.permutation_t_test` (:gh:`11575` by :newcontrib:`Joshua Calder-Travis`)
+- Fix bug where :meth:`mne.viz.Brain.add_volume_labels` used an incorrect orientation (:gh:`11730` by `Alex Rockhill`_)
- Fix bug with :func:`mne.forward.restrict_forward_to_label` where cortical patch information was not adjusted (:gh:`11694` by `Eric Larson`_)
-- Fix hanging interpreter with matplotlib figures using ``mne/viz/_mpl_figure.py`` in spyder console and jupyter notebooks`(:gh:`11696` by `Mathieu Scheltienne`_)
+- Fix bug with PySide6 compatibility (:gh:`11721` by `Eric Larson`_)
+- Fix hanging interpreter with matplotlib figures using ``mne/viz/_mpl_figure.py`` in spyder console and jupyter notebooks (:gh:`11696` by `Mathieu Scheltienne`_)
+- Fix bug with overlapping text for :meth:`mne.Evoked.plot` (:gh:`11698` by `Alex Rockhill`_)
+- For :func:`mne.io.read_raw_eyelink`, the default value of the ``gap_description`` parameter is now ``'BAD_ACQ_SKIP'``, following MNE convention (:gh:`11719` by `Scott Huberty`_)
+- Allow int-like for the argument ``id`` of `~mne.make_fixed_length_events` (:gh:`11748` by `Mathieu Scheltienne`_)
API changes
~~~~~~~~~~~
-- None yet
+- The ``baseline`` argument can now be array-like (e.g. ``list``, ``tuple``, ``np.ndarray``, ...) instead of only a ``tuple`` (:gh:`11713` by `Clemens Brunner`_)
+- Deprecated ``gap_description`` keyword argument of :func:`mne.io.read_raw_eyelink`, which will be removed in mne version 1.6, in favor of using :meth:`mne.Annotations.rename` (:gh:`11719` by `Scott Huberty`_)
diff --git a/doc/changes/names.inc b/doc/changes/names.inc
index 48f1ecd67a1..357f929309e 100644
--- a/doc/changes/names.inc
+++ b/doc/changes/names.inc
@@ -1,3 +1,5 @@
+.. _Aaron Earle-Richardson: https://github.com/Aaronearlerichardson
+
.. _Abram Hindle: https://softwareprocess.es
.. _Adam Li: https://github.com/adam2392
@@ -118,6 +120,8 @@
.. _Dinara Issagaliyeva: https://github.com/dissagaliyeva
+.. _Diptyajit Das: https://github.com/dasdiptyajit
+
.. _Dirk Gütlin: https://github.com/DiGyt
.. _Dmitrii Altukhov: https://github.com/dmalt
@@ -170,6 +174,8 @@
.. _Fu-Te Wong: https://github.com/zuxfoucault
+.. _Gennadiy Belonosov: https://github.com/Genuster
+
.. _Geoff Brookshire: https://github.com/gbrookshire
.. _George O'Neill: https://georgeoneill.github.io
@@ -256,6 +262,8 @@
.. _Joshua Bear: https://github.com/joshbear
+.. _Joshua Calder-Travis: https://github.com/jCalderTravis
+
.. _Joshua Teves: https://github.com/jbteves
.. _Judy D Zhu: https://github.com/JD-Zhu
diff --git a/doc/conf.py b/doc/conf.py
index e6c7b55de81..b30d777292f 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -398,6 +398,7 @@
r"\.fromkeys",
r"\.items",
r"\.keys",
+ r"\.move_to_end",
r"\.pop",
r"\.popitem",
r"\.setdefault",
@@ -735,6 +736,7 @@ def append_attr_meth_examples(app, what, name, obj, options, lines):
("py:class", "(k, v), remove and return some (key, value) pair as a"),
("py:class", "_FuncT"), # type hint used in @verbose decorator
("py:class", "mne.utils._logging._FuncT"),
+ ("py:class", "None. Remove all items from od."),
]
nitpick_ignore_regex = [
("py:.*", r"mne\.io\.BaseRaw.*"),
@@ -1125,6 +1127,12 @@ def append_attr_meth_examples(app, what, name, obj, options, lines):
url="https://www.ru.nl/donders/",
size=xl,
),
+ dict(
+ name="Human Neuroscience Platforn at Fondation Campus Biotech Geneva", # noqa E501
+ img="FCBG.svg",
+ url="https://hnp.fcbg.ch/",
+ size=sm,
+ ),
],
# \u00AD is an optional hyphen (not rendered unless needed)
# If these are changed, the Makefile should be updated, too
diff --git a/doc/file_io.rst b/doc/file_io.rst
index 4ddcf7d1d01..c7957fb8468 100644
--- a/doc/file_io.rst
+++ b/doc/file_io.rst
@@ -61,4 +61,4 @@ Base class:
:toctree: generated
:template: autosummary/class_no_members.rst
- BaseEpochs
+ BaseEpochs
\ No newline at end of file
diff --git a/doc/install/installers.rst b/doc/install/installers.rst
index b17b1b5459c..a02b86f5b3b 100644
--- a/doc/install/installers.rst
+++ b/doc/install/installers.rst
@@ -17,7 +17,7 @@ Got any questions? Let us know on the `MNE Forum`_!
:class-content: text-center
:name: linux-installers
- .. button-link:: https://github.com/mne-tools/mne-installers/releases/download/v1.4.0/MNE-Python-1.4.0_1-Linux.sh
+ .. button-link:: https://github.com/mne-tools/mne-installers/releases/download/v1.4.2/MNE-Python-1.4.2_0-Linux.sh
:ref-type: ref
:color: primary
:shadow:
@@ -31,14 +31,14 @@ Got any questions? Let us know on the `MNE Forum`_!
.. code-block:: console
- $ sh ./MNE-Python-1.4.0_1-Linux.sh
+ $ sh ./MNE-Python-1.4.2_0-Linux.sh
.. tab-item:: macOS (Intel)
:class-content: text-center
:name: macos-intel-installers
- .. button-link:: https://github.com/mne-tools/mne-installers/releases/download/v1.4.0/MNE-Python-1.4.0_1-macOS_Intel.pkg
+ .. button-link:: https://github.com/mne-tools/mne-installers/releases/download/v1.4.2/MNE-Python-1.4.2_0-macOS_Intel.pkg
:ref-type: ref
:color: primary
:shadow:
@@ -54,7 +54,7 @@ Got any questions? Let us know on the `MNE Forum`_!
:class-content: text-center
:name: macos-apple-installers
- .. button-link:: https://github.com/mne-tools/mne-installers/releases/download/v1.4.0/MNE-Python-1.4.0_1-macOS_M1.pkg
+ .. button-link:: https://github.com/mne-tools/mne-installers/releases/download/v1.4.2/MNE-Python-1.4.2_0-macOS_M1.pkg
:ref-type: ref
:color: primary
:shadow:
@@ -70,7 +70,7 @@ Got any questions? Let us know on the `MNE Forum`_!
:class-content: text-center
:name: windows-installers
- .. button-link:: https://github.com/mne-tools/mne-installers/releases/download/v1.4.0/MNE-Python-1.4.0_1-Windows.exe
+ .. button-link:: https://github.com/mne-tools/mne-installers/releases/download/v1.4.2/MNE-Python-1.4.2_0-Windows.exe
:ref-type: ref
:color: primary
:shadow:
@@ -118,7 +118,7 @@ information, including a line that will read something like:
.. code-block::
- Using Python: /some/directory/mne-python_1.4.0_1/bin/python
+ Using Python: /some/directory/mne-python_1.4.2_0/bin/python
This path is what you need to enter in VS Code when selecting the Python
interpreter.
diff --git a/doc/overview/people.rst b/doc/overview/people.rst
index 18404e0e56e..01213dfbd1b 100644
--- a/doc/overview/people.rst
+++ b/doc/overview/people.rst
@@ -28,6 +28,7 @@ Steering Council
* `Luke Bloy`_
* `Mainak Jas`_
* `Marijn van Vliet`_
+* `Mathieu Scheltienne`_
* `Mikołaj Magnuski`_
* `Richard Höchenberger`_
* `Robert Luke`_
diff --git a/doc/overview/roadmap.rst b/doc/overview/roadmap.rst
index 26a301383ce..103234a5742 100644
--- a/doc/overview/roadmap.rst
+++ b/doc/overview/roadmap.rst
@@ -14,21 +14,6 @@ Code projects, while others require more extensive work.
Open
----
-.. _time-frequency-viz:
-
-Time-frequency visualization
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-We should implement a viewer for interactive visualization of volumetric
-source-time-frequency (5-D) maps on MRI slices (orthogonal 2D viewer).
-`NutmegTrip `__
-(written by Sarang Dalal) provides similar functionality in Matlab in
-conjunction with FieldTrip. Example of NutmegTrip's source-time-frequency mode
-in action (click for link to YouTube):
-
-.. image:: https://i.ytimg.com/vi/xKdjZZphdNc/maxresdefault.jpg
- :target: https://www.youtube.com/watch?v=xKdjZZphdNc
- :width: 50%
-
Clustering statistics API
^^^^^^^^^^^^^^^^^^^^^^^^^
The current clustering statistics code has limited functionality. It should be
@@ -73,30 +58,15 @@ as well as:
- `BNCI Horizon `__
BCI datasets.
-Integrate OpenMEEG via improved Python bindings
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-`OpenMEEG `__ is a state-of-the art solver for
-forward modeling in the field of brain imaging with MEG/EEG. It solves
-numerically partial differential equations (PDE). It is written in C++ with
-Python bindings written in `SWIG `__.
-The ambition of the project is to integrate OpenMEEG into MNE offering to MNE
-the ability to solve more forward problems (cortical mapping, intracranial
-recordings, etc.). Some software tasks that shall be completed:
-
-- Cleanup Python bindings (remove useless functions, check memory managements,
- etc.)
-- Write example scripts for OpenMEEG that automatically generate web pages as
- for `MNE `__
-- Understand how MNE encodes info about sensors (location, orientation,
- integration points etc.) and allow OpenMEEG to be used.
-- Help package OpenMEEG for Debian/Ubuntu
-- Help manage `the continuous integration system
- `__
-
-
In progress
-----------
+Eye-tracking support
+^^^^^^^^^^^^^^^^^^^^
+We had a GSoC student funded to improve support for eye-tracking data, see
+`the GSoC proposal `__
+for details.
+
Diversity, Equity, and Inclusion (DEI)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
MNE-Python is committed to recruiting and retaining a diverse pool of
@@ -187,6 +157,41 @@ Our documentation has many minor issues, which can be found under the tag
Completed
---------
+Integrate OpenMEEG via improved Python bindings
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+`OpenMEEG `__ is a state-of-the art solver for
+forward modeling in the field of brain imaging with MEG/EEG. It solves
+numerically partial differential equations (PDE). It is written in C++ with
+Python bindings written in `SWIG `__.
+The ambition of the project is to integrate OpenMEEG into MNE offering to MNE
+the ability to solve more forward problems (cortical mapping, intracranial
+recordings, etc.). Tasks that have been completed:
+
+- Cleanup Python bindings (remove useless functions, check memory managements,
+ etc.)
+- Understand how MNE encodes info about sensors (location, orientation,
+ integration points etc.) and allow OpenMEEG to be used.
+- Modernize CI systems (e.g., using ``cibuildwheel``).
+
+See `OpenMEEG`_ for details.
+
+.. _time-frequency-viz:
+
+Time-frequency visualization
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+We implemented a viewer for interactive visualization of volumetric
+source-time-frequency (5-D) maps on MRI slices (orthogonal 2D viewer).
+`NutmegTrip `__
+(written by Sarang Dalal) provides similar functionality in MATLAB in
+conjunction with FieldTrip. Example of NutmegTrip's source-time-frequency mode
+in action (click for link to YouTube):
+
+.. image:: https://i.ytimg.com/vi/xKdjZZphdNc/maxresdefault.jpg
+ :target: https://www.youtube.com/watch?v=xKdjZZphdNc
+ :width: 50%
+
+See :func:`mne-gui-addons:mne_gui_addons.view_vol_stc`.
+
Distributed computing support
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
`MNE-BIDS-Pipeline`_ has been enhanced with support for cloud computing
diff --git a/doc/preprocessing.rst b/doc/preprocessing.rst
index 0ed960be4b9..7028e7ab307 100644
--- a/doc/preprocessing.rst
+++ b/doc/preprocessing.rst
@@ -153,6 +153,8 @@ Projections:
.. autosummary::
:toctree: generated/
+ Calibration
+ read_eyelink_calibration
set_channel_types_eyetrack
EEG referencing:
diff --git a/examples/time_frequency/time_frequency_simulated.py b/examples/time_frequency/time_frequency_simulated.py
index bf8b1dba6ca..46747b6ae69 100644
--- a/examples/time_frequency/time_frequency_simulated.py
+++ b/examples/time_frequency/time_frequency_simulated.py
@@ -57,9 +57,9 @@
# Add a 50 Hz sinusoidal burst to the noise and ramp it.
t = np.arange(n_times, dtype=np.float64) / sfreq
signal = np.sin(np.pi * 2.0 * 50.0 * t) # 50 Hz sinusoid signal
-signal[np.logical_or(t < 0.45, t > 0.55)] = 0.0 # Hard windowing
+signal[np.logical_or(t < 0.45, t > 0.55)] = 0.0 # hard windowing
on_time = np.logical_and(t >= 0.45, t <= 0.55)
-signal[on_time] *= np.hanning(on_time.sum()) # Ramping
+signal[on_time] *= np.hanning(on_time.sum()) # ramping
data[:, 100:-100] += np.tile(signal, n_epochs) # add signal
raw = RawArray(data, info)
diff --git a/mne/baseline.py b/mne/baseline.py
index 21aebdde807..e4c43317ee0 100644
--- a/mne/baseline.py
+++ b/mne/baseline.py
@@ -6,7 +6,7 @@
import numpy as np
-from .utils import logger, verbose, _check_option
+from .utils import logger, verbose, _check_option, _validate_type
def _log_rescale(baseline, mode="mean"):
@@ -143,14 +143,14 @@ def fun(d, m):
def _check_baseline(baseline, times, sfreq, on_baseline_outside_data="raise"):
- """Check if the baseline is valid, and adjust it if requested.
+ """Check if the baseline is valid and adjust it if requested.
- ``None`` values inside the baseline parameter will be replaced with
- ``times[0]`` and ``times[-1]``.
+ ``None`` values inside ``baseline`` will be replaced with ``times[0]`` and
+ ``times[-1]``.
Parameters
----------
- baseline : tuple | None
+ baseline : array-like, shape (2,) | None
Beginning and end of the baseline period, in seconds. If ``None``,
assume no baseline and return immediately.
times : array
@@ -158,27 +158,27 @@ def _check_baseline(baseline, times, sfreq, on_baseline_outside_data="raise"):
sfreq : float
The sampling rate.
on_baseline_outside_data : 'raise' | 'info' | 'adjust'
- What do do if the baseline period exceeds the data.
+ What to do if the baseline period exceeds the data.
If ``'raise'``, raise an exception (default).
If ``'info'``, log an info message.
- If ``'adjust'``, adjust the baseline such that it's within the data
- range again.
+ If ``'adjust'``, adjust the baseline such that it is within the data range.
Returns
-------
(baseline_tmin, baseline_tmax) | None
- The baseline with ``None`` values replaced with times, and with
- adjusted times if ``on_baseline_outside_data='adjust'``; or ``None``
- if the ``baseline`` parameter is ``None``.
-
+ The baseline with ``None`` values replaced with times, and with adjusted times
+ if ``on_baseline_outside_data='adjust'``; or ``None``, if ``baseline`` is
+ ``None``.
"""
if baseline is None:
return None
- if not isinstance(baseline, tuple) or len(baseline) != 2:
+ _validate_type(baseline, "array-like")
+ baseline = tuple(baseline)
+
+ if len(baseline) != 2:
raise ValueError(
- f"`baseline={baseline}` is an invalid argument, must "
- f"be a tuple of length 2 or None"
+ f"baseline must have exactly two elements (got {len(baseline)})."
)
tmin, tmax = times[0], times[-1]
diff --git a/mne/channels/_standard_montage_utils.py b/mne/channels/_standard_montage_utils.py
index c136b107924..fe3c0e0e975 100644
--- a/mne/channels/_standard_montage_utils.py
+++ b/mne/channels/_standard_montage_utils.py
@@ -124,6 +124,7 @@ def _mgh_or_standard(basename, head_size, coord_frame="unknown"):
"EGI_256": _egi_256,
"easycap-M1": partial(_easycap, basename="easycap-M1.txt"),
"easycap-M10": partial(_easycap, basename="easycap-M10.txt"),
+ "easycap-M43": partial(_easycap, basename="easycap-M43.txt"),
"GSN-HydroCel-128": partial(_hydrocel, basename="GSN-HydroCel-128.sfp"),
"GSN-HydroCel-129": partial(_hydrocel, basename="GSN-HydroCel-129.sfp"),
"GSN-HydroCel-256": partial(_hydrocel, basename="GSN-HydroCel-256.sfp"),
diff --git a/mne/channels/channels.py b/mne/channels/channels.py
index f599313304c..7b70daadf34 100644
--- a/mne/channels/channels.py
+++ b/mne/channels/channels.py
@@ -385,7 +385,7 @@ def set_channel_types(self, mapping, *, on_unit_change="warn", verbose=None):
mapping : dict
A dictionary mapping channel names to sensor types, e.g.,
``{'EEG061': 'eog'}``.
- on_unit_change : 'raise' | 'warn' | 'ignore'
+ on_unit_change : ``'raise'`` | ``'warn'`` | ``'ignore'``
What to do if the measurement unit of a channel is changed
automatically to match the new sensor type.
@@ -476,7 +476,7 @@ def set_channel_types(self, mapping, *, on_unit_change="warn", verbose=None):
return self
@verbose
- def rename_channels(self, mapping, allow_duplicates=False, verbose=None):
+ def rename_channels(self, mapping, allow_duplicates=False, *, verbose=None):
"""Rename channels.
Parameters
@@ -527,6 +527,7 @@ def plot_sensors(
block=False,
show=True,
sphere=None,
+ *,
verbose=None,
):
"""Plot sensor positions.
@@ -541,9 +542,9 @@ def plot_sensors(
control key. The selected channels are returned along with the
figure instance. Defaults to 'topomap'.
ch_type : None | str
- The channel type to plot. Available options 'mag', 'grad', 'eeg',
- 'seeg', 'dbs', 'ecog', 'all'. If ``'all'``, all the available mag,
- grad, eeg, seeg, dbs, and ecog channels are plotted. If
+ The channel type to plot. Available options ``'mag'``, ``'grad'``,
+ ``'eeg'``, ``'seeg'``, ``'dbs'``, ``'ecog'``, ``'all'``. If ``'all'``, all
+ the available mag, grad, eeg, seeg, dbs, and ecog channels are plotted. If
None (default), then channels are chosen in the order given above.
title : str | None
Title for the figure. If None (default), equals to ``'Sensor
@@ -1244,7 +1245,7 @@ def interpolate_bads(
@verbose
-def rename_channels(info, mapping, allow_duplicates=False, verbose=None):
+def rename_channels(info, mapping, allow_duplicates=False, *, verbose=None):
"""Rename channels.
Parameters
@@ -1896,7 +1897,7 @@ def _compute_ch_adjacency(info, ch_type):
%(info_not_none)s
ch_type : str
The channel type for computing the adjacency matrix. Currently
- supports 'mag', 'grad' and 'eeg'.
+ supports ``'mag'``, ``'grad'`` and ``'eeg'``.
Returns
-------
diff --git a/mne/channels/data/montages/easycap-M43.txt b/mne/channels/data/montages/easycap-M43.txt
new file mode 100644
index 00000000000..47bbad785ec
--- /dev/null
+++ b/mne/channels/data/montages/easycap-M43.txt
@@ -0,0 +1,65 @@
+Site Theta Phi
+1 23 90
+2 23 30
+3 23 -30
+4 23 -90
+5 -23 30
+6 -23 -30
+7 46 74
+8 46 41
+9 46 8
+10 46 -25
+11 46 -57
+12 46 -90
+13 -46 57
+14 -46 25
+15 -46 -8
+16 -46 -41
+17 -46 -74
+18 69 76
+19 69 49
+20 69 21
+21 69 -7
+22 69 -35
+23 69 -62
+24 69 -90
+25 -69 62
+26 -69 35
+27 -69 7
+28 -69 -21
+29 -69 -49
+30 -69 -76
+31 92 90
+32 92 62
+33 92 34
+34 92 6
+35 92 -21
+36 92 -49
+37 92 -76
+38 -92 76
+39 -92 49
+40 -92 21
+41 -92 -6
+42 -92 -34
+43 -92 -62
+44 115 35
+45 115 10
+46 115 -15
+47 115 -40
+48 115 -65
+49 115 -90
+50 -115 65
+51 -115 40
+52 -115 15
+53 -115 -10
+54 -115 -35
+55 138 23
+56 138 -15
+57 138 -40
+58 138 -65
+59 138 -90
+60 -138 65
+61 -138 40
+62 -138 15
+63 -138 -23
+Ref 0 0
diff --git a/mne/channels/montage.py b/mne/channels/montage.py
index 11d48a099c8..13e064ead4a 100644
--- a/mne/channels/montage.py
+++ b/mne/channels/montage.py
@@ -139,6 +139,10 @@ class _BuiltinStandardMontage:
name="easycap-M10",
description="EasyCap with numbered electrodes (61 locations)",
),
+ _BuiltinStandardMontage(
+ name="easycap-M43",
+ description="EasyCap with numbered electrodes (64 locations)",
+ ),
_BuiltinStandardMontage(
name="EGI_256",
description="Geodesic Sensor Net (256 locations)",
diff --git a/mne/channels/tests/test_montage.py b/mne/channels/tests/test_montage.py
index ca7347b21ad..32d41f56fde 100644
--- a/mne/channels/tests/test_montage.py
+++ b/mne/channels/tests/test_montage.py
@@ -1915,7 +1915,7 @@ def test_read_dig_hpts():
def test_get_builtin_montages():
"""Test help function to obtain builtin montages."""
- EXPECTED_COUNT = 27
+ EXPECTED_COUNT = 28
montages = get_builtin_montages()
assert len(montages) == EXPECTED_COUNT
diff --git a/mne/chpi.py b/mne/chpi.py
index 3bbddb1647b..4dce5b6b413 100644
--- a/mne/chpi.py
+++ b/mne/chpi.py
@@ -1607,7 +1607,7 @@ def get_active_chpi(raw, *, on_missing="raise", verbose=None):
)
# extract hpi info
chpi_info = get_chpi_info(raw.info, on_missing=on_missing)
- if len(chpi_info[2]) == 0:
+ if (len(chpi_info[2]) == 0) or (chpi_info[1] is None):
return np.zeros_like(raw.times)
# extract hpi time series and infer which one was on
diff --git a/mne/conftest.py b/mne/conftest.py
index c99d2eeebe1..616356396ce 100644
--- a/mne/conftest.py
+++ b/mne/conftest.py
@@ -154,6 +154,8 @@ def pytest_configure(config):
ignore:pkg_resources is deprecated as an API.*:DeprecationWarning
# h5py
ignore:`product` is deprecated as of NumPy.*:DeprecationWarning
+ # pandas
+ ignore:.*np\.find_common_type is deprecated.*:DeprecationWarning
""" # noqa: E501
for warning_line in warning_lines.split("\n"):
warning_line = warning_line.strip()
diff --git a/mne/event.py b/mne/event.py
index 63cb994db8a..5a6031f1d0f 100644
--- a/mne/event.py
+++ b/mne/event.py
@@ -24,6 +24,7 @@
_check_fname,
_on_missing,
_check_on_missing,
+ _check_integer_or_list,
)
from .io.constants import FIFF
from .io.tree import dir_tree_find
@@ -59,8 +60,7 @@ def pick_events(events, include=None, exclude=None, step=False):
The list of events.
"""
if include is not None:
- if not isinstance(include, list):
- include = [include]
+ include = _check_integer_or_list(include, "include")
mask = np.zeros(len(events), dtype=bool)
for e in include:
mask = np.logical_or(mask, events[:, 2] == e)
@@ -68,8 +68,7 @@ def pick_events(events, include=None, exclude=None, step=False):
mask = np.logical_or(mask, events[:, 1] == e)
events = events[mask]
elif exclude is not None:
- if not isinstance(exclude, list):
- exclude = [exclude]
+ exclude = _check_integer_or_list(exclude, "exclude")
mask = np.ones(len(events), dtype=bool)
for e in exclude:
mask = np.logical_and(mask, events[:, 2] != e)
@@ -958,7 +957,7 @@ def make_fixed_length_events(
from .io.base import BaseRaw
_validate_type(raw, BaseRaw, "raw")
- _validate_type(id, int, "id")
+ _validate_type(id, "int", "id")
_validate_type(duration, "numeric", "duration")
_validate_type(overlap, "numeric", "overlap")
duration, overlap = float(duration), float(overlap)
diff --git a/mne/io/eyelink/__init__.py b/mne/io/eyelink/__init__.py
index 77ee7ebc9ef..e8f09e1aee5 100644
--- a/mne/io/eyelink/__init__.py
+++ b/mne/io/eyelink/__init__.py
@@ -1,6 +1,7 @@
"""Module for loading Eye-Tracker data."""
-# Author: Dominik Welke
+# Authors: Dominik Welke
+# Scott Huberty
#
# License: BSD-3-Clause
diff --git a/mne/io/eyelink/_utils.py b/mne/io/eyelink/_utils.py
new file mode 100644
index 00000000000..3e6cf76e2fe
--- /dev/null
+++ b/mne/io/eyelink/_utils.py
@@ -0,0 +1,113 @@
+"""Helper functions for reading eyelink ASCII files."""
+# Authors: Scott Huberty
+# License: BSD-3-Clause
+
+import re
+import numpy as np
+
+
+def _find_recording_start(lines):
+ """Return the first START line in an SR Research EyeLink ASCII file.
+
+ Parameters
+ ----------
+ lines: A list of strings, which are The lines in an eyelink ASCII file.
+
+ Returns
+ -------
+ The line that contains the info on the start of the recording.
+ """
+ for line in lines:
+ if line.startswith("START"):
+ return line
+ raise ValueError("Could not find the start of the recording.")
+
+
+def _parse_validation_line(line):
+ """Parse a single line of eyelink validation data.
+
+ Parameters
+ ----------
+ line: A string containing a line of validation data from an eyelink
+ ASCII file.
+
+ Returns
+ -------
+ A list of tuples containing the validation data.
+ """
+ tokens = line.split()
+ xy = tokens[-6].strip("[]").split(",") # e.g. '960, 540'
+ xy_diff = tokens[-2].strip("[]").split(",") # e.g. '-1.5, -2.8'
+ vals = [float(v) for v in [*xy, tokens[-4], *xy_diff]]
+ vals[3] += vals[0] # pos_x + eye_x i.e. 960 + -1.5
+ vals[4] += vals[1] # pos_y + eye_y
+
+ return tuple(vals)
+
+
+def _parse_calibration(
+ lines, screen_size=None, screen_distance=None, screen_resolution=None
+):
+ """Parse the lines in the given list and returns a list of Calibration instances.
+
+ Parameters
+ ----------
+ lines: A list of strings, which are The lines in an eyelink ASCII file.
+
+ Returns
+ -------
+ A list containing one or more Calibration instances,
+ one for each calibration that was recorded in the eyelink ASCII file
+ data.
+ """
+ from ...preprocessing.eyetracking.calibration import Calibration
+
+ regex = re.compile(r"\d+") # for finding numeric characters
+ calibrations = list()
+ rec_start = float(_find_recording_start(lines).split()[1])
+
+ for line_number, line in enumerate(lines):
+ if (
+ "!CAL VALIDATION " in line and "ABORTED" not in line
+ ): # Start of a calibration
+ tokens = line.split()
+ model = tokens[4] # e.g. 'HV13'
+ this_eye = tokens[6].lower() # e.g. 'left'
+ timestamp = float(tokens[1])
+ onset = (timestamp - rec_start) / 1000.0 # in seconds
+ avg_error = float(line.split("avg.")[0].split()[-1]) # e.g. 0.3
+ max_error = float(line.split("max")[0].split()[-1]) # e.g. 0.9
+
+ n_points = int(regex.search(model).group()) # e.g. 13
+ n_points *= 2 if "LR" in line else 1 # one point per eye if "LR"
+ # The next n_point lines contain the validation data
+ points = []
+ for validation_index in range(n_points):
+ subline = lines[line_number + validation_index + 1]
+ if "!CAL VALIDATION" in subline:
+ continue # for bino mode, skip the second eye's validation summary
+ subline_eye = subline.split("at")[0].split()[-1].lower() # e.g. 'left'
+ if subline_eye != this_eye:
+ continue # skip the validation lines for the other eye
+ point_info = _parse_validation_line(subline)
+ points.append(point_info)
+ # Convert the list of validation data into a numpy array
+ positions = np.array([point[:2] for point in points])
+ offsets = np.array([point[2] for point in points])
+ gaze = np.array([point[3:] for point in points])
+ # create the Calibration instance
+ calibration = Calibration(
+ onset=max(0.0, onset), # 0 if calibrated before recording
+ model=model,
+ eye=this_eye,
+ avg_error=avg_error,
+ max_error=max_error,
+ positions=positions,
+ offsets=offsets,
+ gaze=gaze,
+ screen_size=screen_size,
+ screen_distance=screen_distance,
+ screen_resolution=screen_resolution,
+ )
+ calibrations.append(calibration)
+ return calibrations
diff --git a/mne/io/eyelink/eyelink.py b/mne/io/eyelink/eyelink.py
index e01f46a30b7..5321ddc136d 100644
--- a/mne/io/eyelink/eyelink.py
+++ b/mne/io/eyelink/eyelink.py
@@ -1,3 +1,5 @@
+"""SR Research Eyelink Load Function."""
+
# Authors: Dominik Welke
# Scott Huberty
# Christian O'Reilly
@@ -12,7 +14,7 @@
from ..base import BaseRaw
from ..meas_info import create_info
from ...annotations import Annotations
-from ...utils import logger, verbose, fill_doc, _check_pandas_installed
+from ...utils import _check_fname, _check_pandas_installed, fill_doc, logger, verbose
EYELINK_COLS = {
"timestamp": ("time",),
@@ -293,13 +295,13 @@ def read_raw_eyelink(
apply_offsets=False,
find_overlaps=False,
overlap_threshold=0.05,
- gap_description="bad_rec_gap",
+ gap_description=None,
):
"""Reader for an Eyelink .asc file.
Parameters
----------
- fname : str
+ fname : path-like
Path to the eyelink file (.asc).
%(preload)s
%(verbose)s
@@ -318,15 +320,20 @@ def read_raw_eyelink(
saccades) if their start times and their stop times are both not
separated by more than overlap_threshold.
overlap_threshold : float (default 0.05)
- Time in seconds. Threshold of allowable time-gap between the start and
- stop times of the left and right eyes. If gap is larger than threshold,
- the :class:`mne.Annotations` will be kept separate (i.e. "blink_L",
- "blink_R"). If the gap is smaller than the threshold, the
- :class:`mne.Annotations` will be merged (i.e. "blink_both").
- gap_description : str (default 'bad_rec_gap')
- If there are multiple recording blocks in the file, the description of
+ Time in seconds. Threshold of allowable time-gap between both the start and
+ stop times of the left and right eyes. If the gap is larger than the threshold,
+ the :class:`mne.Annotations` will be kept separate (i.e. ``"blink_L"``,
+ ``"blink_R"``). If the gap is smaller than the threshold, the
+ :class:`mne.Annotations` will be merged and labeled as ``"blink_both"``.
+ Defaults to ``0.05`` seconds (50 ms), meaning that if the blink start times of
+ the left and right eyes are separated by less than 50 ms, and the blink stop
+ times of the left and right eyes are separated by less than 50 ms, then the
+ blink will be merged into a single :class:`mne.Annotations`.
+ gap_description : str (default 'BAD_ACQ_SKIP')
+ This parameter is deprecated and will be removed in 1.6.
+ Use :meth:`mne.Annotations.rename` instead.
the annotation that will span across the gap period between the
- blocks. Uses 'bad_rec_gap' by default so that these time periods will
+ blocks. Uses ``'BAD_ACQ_SKIP'`` by default so that these time periods will
be considered bad by MNE and excluded from operations like epoching.
Returns
@@ -337,17 +344,26 @@ def read_raw_eyelink(
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
+
+ Notes
+ -----
+ It is common for SR Research Eyelink eye trackers to only record data during trials.
+ To avoid frequent data discontinuities and to ensure that the data is continuous
+ so that it can be aligned with EEG and MEG data (if applicable), this reader will
+ preserve the times between recording trials and annotate them with
+ ``'BAD_ACQ_SKIP'``.
"""
- extension = Path(fname).suffix
+ fname = _check_fname(fname, overwrite="read", must_exist=True, name="fname")
+ extension = fname.suffix
if extension not in ".asc":
raise ValueError(
"This reader can only read eyelink .asc files."
- f" Got extension {extension} instead. consult eyelink"
- " manual for converting eyelink data format (.edf)"
+ f" Got extension {extension} instead. consult EyeLink"
+ " manual for converting EyeLink data format (.edf)"
" files to .asc format."
)
- return RawEyelink(
+ raw_eyelink = RawEyelink(
fname,
preload=preload,
verbose=verbose,
@@ -357,6 +373,7 @@ def read_raw_eyelink(
overlap_threshold=overlap_threshold,
gap_desc=gap_description,
)
+ return raw_eyelink
@fill_doc
@@ -365,7 +382,7 @@ class RawEyelink(BaseRaw):
Parameters
----------
- fname : str
+ fname : path-like
Path to the data file (.XXX).
create_annotations : bool | list (default True)
Whether to create mne.Annotations from occular events
@@ -387,11 +404,15 @@ class RawEyelink(BaseRaw):
the :class:`mne.Annotations` will be kept separate (i.e. "blink_L",
"blink_R"). If the gap is smaller than the threshold, the
:class:`mne.Annotations` will be merged (i.e. "blink_both").
- gap_desc : str (default 'bad_rec_gap')
+ gap_desc : str
If there are multiple recording blocks in the file, the description of
the annotation that will span across the gap period between the
- blocks. Uses 'bad_rec_gap' by default so that these time periods will
- be considered bad by MNE and excluded from operations like epoching.
+ blocks. Default is ``None``, which uses 'BAD_ACQ_SKIP' by default so that these
+ timeperiods will be considered bad by MNE and excluded from operations like
+ epoching. Note that this parameter is deprecated and will be removed in 1.6.
+ Use ``mne.annotations.rename`` instead.
+
+
%(preload)s
%(verbose)s
@@ -402,23 +423,6 @@ class RawEyelink(BaseRaw):
dataframes : dict
Dictionary of pandas DataFrames. One for eyetracking samples,
and one for each type of eyelink event (blinks, messages, etc)
- _sample_lines : list
- List of lists, each list is one sample containing eyetracking
- X/Y and pupil channel data (+ other channels, if they exist)
- _event_lines : dict
- Each key contains a list of lists, for an event-type that occurred
- during the recording period. Events can vary, from occular events
- (blinks, saccades, fixations), to messages from the stimulus
- presentation software, or info from a response controller.
- _system_lines : list
- List of tab delimited strings. Each string is a system message,
- that in most cases aren't needed. System messages occur for
- Eyelinks DataViewer application.
- _tracking_mode : str
- Whether whether a single eye was tracked ('monocular'), or both
- ('binocular').
- _gap_desc : str
- The description to be used for annotations returned by _make_gap_annots
See Also
--------
@@ -435,17 +439,26 @@ def __init__(
apply_offsets=False,
find_overlaps=False,
overlap_threshold=0.05,
- gap_desc="bad_rec_gap",
+ gap_desc=None,
):
logger.info("Loading {}".format(fname))
self.fname = Path(fname)
- self._sample_lines = None
- self._event_lines = None
- self._system_lines = None
+ self._sample_lines = None # sample lines from file
+ self._event_lines = None # event messages from file
+ self._system_lines = None # unparsed lines of system messages from file
self._tracking_mode = None # assigned in self._infer_col_names
self._meas_date = None
self._rec_info = None
+ if gap_desc is None:
+ gap_desc = "BAD_ACQ_SKIP"
+ else:
+ logger.warn(
+ "gap_description is deprecated in 1.5 and will be removed in 1.6, "
+ "use raw.annotations.rename to use a description other than "
+ "'BAD_ACQ_SKIP'",
+ FutureWarning,
+ )
self._gap_desc = gap_desc
self.dataframes = {}
diff --git a/mne/io/eyelink/tests/test_eyelink.py b/mne/io/eyelink/tests/test_eyelink.py
index 51d64ea5ed5..c16970a26dc 100644
--- a/mne/io/eyelink/tests/test_eyelink.py
+++ b/mne/io/eyelink/tests/test_eyelink.py
@@ -28,6 +28,7 @@ def test_eyetrack_not_data_ch():
@pytest.mark.parametrize(
"fname, create_annotations, find_overlaps",
[
+ (fname, False, False),
(fname, False, False),
(fname, True, False),
(fname, True, True),
@@ -37,7 +38,9 @@ def test_eyetrack_not_data_ch():
def test_eyelink(fname, create_annotations, find_overlaps):
"""Test reading eyelink asc files."""
raw = read_raw_eyelink(
- fname, create_annotations=create_annotations, find_overlaps=find_overlaps
+ fname,
+ create_annotations=create_annotations,
+ find_overlaps=find_overlaps,
)
# First, tests that shouldn't change based on function arguments
diff --git a/mne/preprocessing/eyetracking/__init__.py b/mne/preprocessing/eyetracking/__init__.py
index 7c7f5f42765..c232475b2fc 100644
--- a/mne/preprocessing/eyetracking/__init__.py
+++ b/mne/preprocessing/eyetracking/__init__.py
@@ -5,3 +5,4 @@
# License: BSD-3-Clause
from .eyetracking import set_channel_types_eyetrack
+from .calibration import Calibration, read_eyelink_calibration
diff --git a/mne/preprocessing/eyetracking/calibration.py b/mne/preprocessing/eyetracking/calibration.py
new file mode 100644
index 00000000000..d6002eaa1f8
--- /dev/null
+++ b/mne/preprocessing/eyetracking/calibration.py
@@ -0,0 +1,229 @@
+"""Eyetracking Calibration(s) class constructor."""
+
+# Authors: Scott Huberty
+# Eric Larson
+# Adapted from: https://github.com/pyeparse/pyeparse
+# License: BSD-3-Clause
+
+from copy import deepcopy
+
+import numpy as np
+
+from ...utils import _check_fname, _validate_type, fill_doc, logger
+from ...viz.utils import plt_show
+
+
+@fill_doc
+class Calibration(dict):
+ """Eye-tracking calibration info.
+
+ This data structure behaves like a dictionary. It contains information regarding a
+ calibration that was conducted during an eye-tracking recording.
+
+ .. note::
+ When possible, a Calibration instance should be created with a helper function,
+ such as :func:`~mne.preprocessing.eyetracking.read_eyelink_calibration`.
+
+ Parameters
+ ----------
+ onset : float
+ The onset of the calibration in seconds. If the calibration was
+ performed before the recording started, the the onset can be
+ negative.
+ model : str
+ A string, which is the model of the eye-tracking calibration that was applied.
+ For example ``'H3'`` for a horizontal only 3-point calibration, or ``'HV3'``
+ for a horizontal and vertical 3-point calibration.
+ eye : str
+ The eye that was calibrated. For example, ``'left'``, or ``'right'``.
+ avg_error : float
+ The average error in degrees between the calibration positions and the
+ actual gaze position.
+ max_error : float
+ The maximum error in degrees that occurred between the calibration
+ positions and the actual gaze position.
+ positions : array-like of float, shape ``(n_calibration_points, 2)``
+ The x and y coordinates of the calibration points.
+ offsets : array-like of float, shape ``(n_calibration_points,)``
+ The error in degrees between the calibration position and the actual
+ gaze position for each calibration point.
+ gaze : array-like of float, shape ``(n_calibration_points, 2)``
+ The x and y coordinates of the actual gaze position for each calibration point.
+ screen_size : array-like of shape ``(2,)``
+ The width and height (in meters) of the screen that the eyetracking
+ data was collected with. For example ``(.531, .298)`` for a monitor with
+ a display area of 531 x 298 mm.
+ screen_distance : float
+ The distance (in meters) from the participant's eyes to the screen.
+ screen_resolution : array-like of shape ``(2,)``
+ The resolution (in pixels) of the screen that the eyetracking data
+ was collected with. For example, ``(1920, 1080)`` for a 1920x1080
+ resolution display.
+ """
+
+ def __init__(
+ self,
+ *,
+ onset,
+ model,
+ eye,
+ avg_error,
+ max_error,
+ positions,
+ offsets,
+ gaze,
+ screen_size=None,
+ screen_distance=None,
+ screen_resolution=None,
+ ):
+ super().__init__(
+ onset=onset,
+ model=model,
+ eye=eye,
+ avg_error=avg_error,
+ max_error=max_error,
+ screen_size=screen_size,
+ screen_distance=screen_distance,
+ screen_resolution=screen_resolution,
+ positions=positions,
+ offsets=offsets,
+ gaze=gaze,
+ )
+
+ def __repr__(self):
+ """Return a summary of the Calibration object."""
+ return (
+ f"Calibration |\n"
+ f" onset: {self['onset']} seconds\n"
+ f" model: {self['model']}\n"
+ f" eye: {self['eye']}\n"
+ f" average error: {self['avg_error']} degrees\n"
+ f" max error: {self['max_error']} degrees\n"
+ f" screen size: {self['screen_size']} meters\n"
+ f" screen distance: {self['screen_distance']} meters\n"
+ f" screen resolution: {self['screen_resolution']} pixels\n"
+ )
+
+ def copy(self):
+ """Copy the instance.
+
+ Returns
+ -------
+ cal : instance of Calibration
+ The copied Calibration.
+ """
+ return deepcopy(self)
+
+ def plot(self, title=None, show_offsets=True, axes=None, show=True):
+ """Visualize calibration.
+
+ Parameters
+ ----------
+ title : str
+ The title to be displayed. Defaults to ``None``, which uses a generic title.
+ show_offsets : bool
+ Whether to display the offset (in visual degrees) of each calibration
+ point or not. Defaults to ``True``.
+ axes : instance of matplotlib.axes.Axes | None
+ Axes to draw the calibration positions to. If ``None`` (default), a new axes
+ will be created.
+ show : bool
+ Whether to show the figure or not. Defaults to ``True``.
+
+ Returns
+ -------
+ fig : instance of matplotlib.figure.Figure
+ The resulting figure object for the calibration plot.
+ """
+ import matplotlib.pyplot as plt
+
+ msg = "positions and gaze keys must both be 2D numpy arrays."
+ assert isinstance(self["positions"], np.ndarray), msg
+ assert isinstance(self["gaze"], np.ndarray), msg
+
+ if axes is not None:
+ from matplotlib.axes import Axes
+
+ _validate_type(axes, Axes, "axes")
+ ax = axes
+ fig = ax.get_figure()
+ else: # create new figure and axes
+ fig, ax = plt.subplots(constrained_layout=True)
+ px, py = self["positions"].T
+ gaze_x, gaze_y = self["gaze"].T
+
+ if title is None:
+ ax.set_title(f"Calibration ({self['eye']} eye)")
+ else:
+ ax.set_title(title)
+ ax.set_xlabel("x (pixels)")
+ ax.set_ylabel("y (pixels)")
+
+ # Display avg_error and max_error in the top left corner
+ text = (
+ f"avg_error: {self['avg_error']} deg.\nmax_error: {self['max_error']} deg."
+ )
+ ax.text(
+ 0,
+ 1.01,
+ text,
+ transform=ax.transAxes,
+ verticalalignment="baseline",
+ fontsize=8,
+ )
+
+ # Invert y-axis because the origin is in the top left corner
+ ax.invert_yaxis()
+ ax.scatter(px, py, color="gray")
+ ax.scatter(gaze_x, gaze_y, color="red", alpha=0.5)
+
+ if show_offsets:
+ for i in range(len(px)):
+ x_offset = 0.01 * gaze_x[i] # 1% to the right of the gazepoint
+ text = ax.text(
+ x=gaze_x[i] + x_offset,
+ y=gaze_y[i],
+ s=self["offsets"][i],
+ fontsize=8,
+ ha="left",
+ va="center",
+ )
+
+ plt_show(show)
+ return fig
+
+
+@fill_doc
+def read_eyelink_calibration(
+ fname, screen_size=None, screen_distance=None, screen_resolution=None
+):
+ """Return info on calibrations collected in an eyelink file.
+
+ Parameters
+ ----------
+ fname : path-like
+ Path to the eyelink file (.asc).
+ screen_size : array-like of shape ``(2,)``
+ The width and height (in meters) of the screen that the eyetracking
+ data was collected with. For example ``(.531, .298)`` for a monitor with
+ a display area of 531 x 298 mm. Defaults to ``None``.
+ screen_distance : float
+ The distance (in meters) from the participant's eyes to the screen.
+ Defaults to ``None``.
+ screen_resolution : array-like of shape ``(2,)``
+ The resolution (in pixels) of the screen that the eyetracking data
+ was collected with. For example, ``(1920, 1080)`` for a 1920x1080
+ resolution display. Defaults to ``None``.
+
+ Returns
+ -------
+ calibrations : list
+ A list of :class:`~mne.preprocessing.eyetracking.Calibration` instances, one for
+ each eye of every calibration that was performed during the recording session.
+ """
+ from ...io.eyelink._utils import _parse_calibration
+
+ fname = _check_fname(fname, overwrite="read", must_exist=True, name="fname")
+ logger.info("Reading calibration data from {}".format(fname))
+ lines = fname.read_text(encoding="ASCII").splitlines()
+ return _parse_calibration(lines, screen_size, screen_distance, screen_resolution)
diff --git a/mne/preprocessing/eyetracking/tests/__init__.py b/mne/preprocessing/eyetracking/tests/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/mne/preprocessing/eyetracking/tests/test_calibration.py b/mne/preprocessing/eyetracking/tests/test_calibration.py
new file mode 100644
index 00000000000..21a0d8b35ea
--- /dev/null
+++ b/mne/preprocessing/eyetracking/tests/test_calibration.py
@@ -0,0 +1,247 @@
+import pytest
+
+import numpy as np
+
+from mne.datasets.testing import data_path, requires_testing_data
+from ..calibration import Calibration, read_eyelink_calibration
+
+# for test_read_eylink_calibration
+testing_path = data_path(download=False)
+fname = testing_path / "eyetrack" / "test_eyelink.asc"
+
+# for test_create_calibration
+POSITIONS = np.array([[115.0, 540.0], [960.0, 540.0], [1804.0, 540.0]])
+OFFSETS = np.array([0.42, 0.23, 0.17])
+GAZES = np.array([[101.5, 554.8], [9.9, -4.1], [1795.9, 539.0]])
+
+EXPECTED_REPR = (
+ "Calibration |\n"
+ " onset: 0 seconds\n"
+ " model: H3\n"
+ " eye: right\n"
+ " average error: 0.5 degrees\n"
+ " max error: 1.0 degrees\n"
+ " screen size: (0.531, 0.298) meters\n"
+ " screen distance: 0.065 meters\n"
+ " screen resolution: (1920, 1080) pixels\n"
+)
+
+
+@pytest.mark.parametrize(
+ (
+ "onset, model, eye, avg_error, max_error, positions, offsets, gaze,"
+ " screen_size, screen_distance, screen_resolution"
+ ),
+ [
+ (
+ 0,
+ "H3",
+ "right",
+ 0.5,
+ 1.0,
+ POSITIONS,
+ OFFSETS,
+ GAZES,
+ (0.531, 0.298),
+ 0.065,
+ (1920, 1080),
+ ),
+ (None, None, None, None, None, None, None, None, None, None, None),
+ ],
+)
+def test_create_calibration(
+ onset,
+ model,
+ eye,
+ avg_error,
+ max_error,
+ positions,
+ offsets,
+ gaze,
+ screen_size,
+ screen_distance,
+ screen_resolution,
+):
+ """Test creating a Calibration object."""
+ kwargs = dict(
+ onset=onset,
+ model=model,
+ eye=eye,
+ avg_error=avg_error,
+ max_error=max_error,
+ positions=positions,
+ offsets=offsets,
+ gaze=gaze,
+ screen_size=screen_size,
+ screen_distance=screen_distance,
+ screen_resolution=screen_resolution,
+ )
+ cal = Calibration(**kwargs)
+ assert cal["onset"] == onset
+ assert cal["model"] == model
+ assert cal["eye"] == eye
+ assert cal["avg_error"] == avg_error
+ assert cal["max_error"] == max_error
+ if positions is not None:
+ assert isinstance(cal["positions"], np.ndarray)
+ assert np.array_equal(cal["positions"], np.array(POSITIONS))
+ else:
+ assert cal["positions"] is None
+ if offsets is not None:
+ assert isinstance(cal["offsets"], np.ndarray)
+ assert np.array_equal(cal["offsets"], np.array(OFFSETS))
+ if gaze is not None:
+ assert isinstance(cal["gaze"], np.ndarray)
+ assert np.array_equal(cal["gaze"], np.array(GAZES))
+ assert cal["screen_size"] == screen_size
+ assert cal["screen_distance"] == screen_distance
+ assert cal["screen_resolution"] == screen_resolution
+ # test copy method
+ copied_obj = cal.copy()
+ # Check if the copied object is an instance of Calibration
+ assert isinstance(copied_obj, Calibration)
+ # Check if the an attribute of the copied object is equal to the original object
+ assert copied_obj["onset"] == cal["onset"]
+ # Modify the copied object and check if it is independent from the original object
+ copied_obj["onset"] = 20
+ assert copied_obj["onset"] != cal["onset"]
+ # test __repr__
+ if cal["onset"] is not None:
+ assert repr(cal) == EXPECTED_REPR # test __repr__
+
+
+@requires_testing_data
+@pytest.mark.parametrize("fname", [(fname)])
+def test_read_calibration(fname):
+ """Test reading calibration data from an eyelink asc file."""
+ calibrations = read_eyelink_calibration(fname)
+ # These numbers were pulled from the file and confirmed.
+ POSITIONS_L = (
+ [960, 540],
+ [960, 92],
+ [960, 987],
+ [115, 540],
+ [1804, 540],
+ [216, 145],
+ [1703, 145],
+ [216, 934],
+ [1703, 934],
+ [537, 316],
+ [1382, 316],
+ [537, 763],
+ [1382, 763],
+ )
+
+ DIFF_L = (
+ [9.9, -4.1],
+ [-7.8, 16.0],
+ [-1.9, -14.2],
+ [13.5, -14.8],
+ [8.1, 1.0],
+ [-7.0, -15.4],
+ [-10.1, -1.4],
+ [-0.3, 6.9],
+ [-32.3, -28.1],
+ [8.2, 7.6],
+ [9.6, 2.1],
+ [-10.6, -2.0],
+ [-11.8, 8.4],
+ )
+ GAZE_L = np.array(POSITIONS_L) + np.array(DIFF_L)
+
+ POSITIONS_R = (
+ [960, 540],
+ [960, 92],
+ [960, 987],
+ [115, 540],
+ [1804, 540],
+ [216, 145],
+ [1703, 145],
+ [216, 934],
+ [1703, 934],
+ [537, 316],
+ [1382, 316],
+ [537, 763],
+ [1382, 763],
+ )
+ DIFF_R = (
+ [-5.2, -16.1],
+ [23.7, 1.3],
+ [2.0, -9.3],
+ [4.4, 1.5],
+ [-6.5, -12.7],
+ [16.6, -7.5],
+ [5.7, -1.8],
+ [15.4, -3.5],
+ [-2.0, -10.2],
+ [0.1, 8.3],
+ [1.9, -15.8],
+ [-24.8, -2.3],
+ [3.2, -9.2],
+ )
+ GAZE_R = np.array(POSITIONS_R) + np.array(DIFF_R)
+
+ OFFSETS_R = [
+ 0.36,
+ 0.50,
+ 0.20,
+ 0.10,
+ 0.30,
+ 0.38,
+ 0.13,
+ 0.33,
+ 0.22,
+ 0.18,
+ 0.34,
+ 0.52,
+ 0.21,
+ ]
+
+ assert len(calibrations) == 2 # calibration[0] is left, calibration[1] is right
+ assert calibrations[0]["onset"] == 0
+ assert calibrations[1]["onset"] == 0
+ assert calibrations[0]["model"] == "HV13"
+ assert calibrations[1]["model"] == "HV13"
+ assert calibrations[0]["eye"] == "left"
+ assert calibrations[1]["eye"] == "right"
+ assert calibrations[0]["avg_error"] == 0.30
+ assert calibrations[0]["max_error"] == 0.90
+ assert calibrations[1]["avg_error"] == 0.31
+ assert calibrations[1]["max_error"] == 0.52
+ assert np.array_equal(POSITIONS_L, calibrations[0]["positions"])
+ assert np.array_equal(POSITIONS_R, calibrations[1]["positions"])
+ assert np.array_equal(GAZE_L, calibrations[0]["gaze"])
+ assert np.array_equal(GAZE_R, calibrations[1]["gaze"])
+ assert np.array_equal(OFFSETS_R, calibrations[1]["offsets"])
+
+
+@requires_testing_data
+@pytest.mark.parametrize(
+ "fname, axes",
+ [(fname, None), (fname, True)],
+)
+def test_plot_calibration(fname, axes):
+ """Test plotting calibration data."""
+ import matplotlib.pyplot as plt
+
+ # Set the non-interactive backend
+ plt.switch_backend("agg")
+
+ if axes:
+ axes = plt.subplot()
+ calibrations = read_eyelink_calibration(fname)
+ cal_left = calibrations[0]
+ fig = cal_left.plot(show=True, show_offsets=True, axes=axes)
+ ax = fig.axes[0]
+
+ scatter1 = ax.collections[0]
+ scatter2 = ax.collections[1]
+ px, py = cal_left["positions"].T
+ gaze_x, gaze_y = cal_left["gaze"].T
+
+ assert ax.title.get_text() == f"Calibration ({cal_left['eye']} eye)"
+ assert len(ax.collections) == 2 # Two scatter plots
+
+ assert np.allclose(scatter1.get_offsets(), np.column_stack((px, py)))
+ assert np.allclose(scatter2.get_offsets(), np.column_stack((gaze_x, gaze_y)))
+ plt.close(fig)
diff --git a/mne/preprocessing/stim.py b/mne/preprocessing/stim.py
index 3448a82a3ad..cd78ae327f7 100644
--- a/mne/preprocessing/stim.py
+++ b/mne/preprocessing/stim.py
@@ -14,7 +14,7 @@
def _get_window(start, end):
"""Return window which has length as much as parameter start - end."""
- from scipy.signal import hann
+ from scipy.signal.windows import hann
window = 1 - np.r_[hann(4)[:2], np.ones(np.abs(end - start) - 4), hann(4)[-2:]].T
return window
diff --git a/mne/report/tests/test_report.py b/mne/report/tests/test_report.py
index c7357b53c41..789fef10825 100644
--- a/mne/report/tests/test_report.py
+++ b/mne/report/tests/test_report.py
@@ -401,7 +401,10 @@ def test_report_raw_psd_and_date(tmp_path):
def test_render_add_sections(renderer, tmp_path):
"""Test adding figures/images to section."""
pytest.importorskip("nibabel")
- from pyvista.plotting import plotting
+ try:
+ from pyvista.plotting.plotter import _ALL_PLOTTERS
+ except Exception: # PV < 0.40
+ from pyvista.plotting.plotting import _ALL_PLOTTERS
report = Report(subjects_dir=subjects_dir)
# Check add_figure functionality
@@ -427,15 +430,15 @@ def test_render_add_sections(renderer, tmp_path):
report.add_image(image="foobar.xxx", title="H")
evoked = read_evokeds(evoked_fname, condition="Left Auditory", baseline=(-0.2, 0.0))
- n_before = len(plotting._ALL_PLOTTERS)
+ n_before = len(_ALL_PLOTTERS)
fig = plot_alignment(
evoked.info, trans_fname, subject="sample", subjects_dir=subjects_dir
)
n_after = n_before + 1
- assert n_after == len(plotting._ALL_PLOTTERS)
+ assert n_after == len(_ALL_PLOTTERS)
report.add_figure(fig=fig, title="random image")
- assert n_after == len(plotting._ALL_PLOTTERS) # not closed
+ assert n_after == len(_ALL_PLOTTERS) # not closed
assert repr(report)
fname = tmp_path / "test.html"
report.save(fname, open_browser=False)
diff --git a/mne/stats/tests/test_permutations.py b/mne/stats/tests/test_permutations.py
index 2a0b86381b7..245ac140182 100644
--- a/mne/stats/tests/test_permutations.py
+++ b/mne/stats/tests/test_permutations.py
@@ -5,6 +5,7 @@
from numpy.testing import assert_array_equal, assert_allclose
import numpy as np
from scipy import stats, sparse
+import pytest
from mne.stats import permutation_cluster_1samp_test
from mne.stats.permutations import (
@@ -55,9 +56,25 @@ def test_permutation_t_test():
assert_allclose(t_obs_clust, t_obs)
assert_allclose(p_values_clust, p_values[keep], atol=1e-2)
+
+@pytest.mark.parametrize(
+ "tail_name,tail_code",
+ [
+ ("two-sided", 0),
+ pytest.param(
+ "less", -1, marks=pytest.mark.xfail(reason="Bug in permutation function")
+ ),
+ pytest.param(
+ "greater", 1, marks=pytest.mark.xfail(reason="Bug in permutation function")
+ ),
+ ],
+)
+def test_permutation_t_test_tail(tail_name, tail_code):
+ """Test that tails work properly."""
X = np.random.randn(18, 1)
- t_obs, p_values, H0 = permutation_t_test(X, n_permutations="all")
- t_obs_scipy, p_values_scipy = stats.ttest_1samp(X[:, 0], 0)
+
+ t_obs, p_values, _ = permutation_t_test(X, n_permutations="all", tail=tail_code)
+ t_obs_scipy, p_values_scipy = stats.ttest_1samp(X[:, 0], 0, alternative=tail_name)
assert_allclose(t_obs[0], t_obs_scipy, 8)
assert_allclose(p_values[0], p_values_scipy, rtol=1e-2)
diff --git a/mne/stats/tests/test_regression.py b/mne/stats/tests/test_regression.py
index 27ca66fd915..02517392aa8 100644
--- a/mne/stats/tests/test_regression.py
+++ b/mne/stats/tests/test_regression.py
@@ -8,7 +8,7 @@
from numpy.testing import assert_array_equal, assert_allclose, assert_equal
import pytest
-from scipy.signal import hann
+from scipy.signal.windows import hann
import mne
from mne import read_source_estimate
diff --git a/mne/surface.py b/mne/surface.py
index 8f76e06ffdd..662ce9992a0 100644
--- a/mne/surface.py
+++ b/mne/surface.py
@@ -1917,20 +1917,23 @@ def _marching_cubes(image, level, smooth=0, fill_hole_size=None, use_flying_edge
f"{level.size} elements"
)
+ # vtkImageData indexes as slice, row, col (Z, Y, X):
+ # https://discourse.vtk.org/t/very-confused-about-imdata-matrix-index-order/6608/2
+ # We can accomplish this by raveling with order='F' later, so we might as
+ # well make a copy with Fortran order now.
+ # We also use double as passing integer types directly can be problematic!
+ image = np.array(image, dtype=float, order="F")
+ image_shape = image.shape
+
# fill holes
if fill_hole_size is not None:
- image = image.copy() # don't modify original
for val in level:
bin_image = image == val
mask = image == 0 # don't go into other areas
bin_image = binary_dilation(bin_image, iterations=fill_hole_size, mask=mask)
image[bin_image] = val
- # force double as passing integer types directly can be problematic!
- image_shape = image.shape
- # use order='A' to automatically detect when Fortran ordering is needed
- data_vtk = numpy_to_vtk(image.ravel(order="A").astype(float), deep=True)
- del image
+ data_vtk = numpy_to_vtk(image.ravel(order="F"), deep=False)
mc = vtkDiscreteFlyingEdges3D() if use_flying_edges else vtkDiscreteMarchingCubes()
# create image
@@ -1978,8 +1981,8 @@ def _marching_cubes(image, level, smooth=0, fill_hole_size=None, use_flying_edge
polydata = geometry.GetOutput()
rr = vtk_to_numpy(polydata.GetPoints().GetData())
tris = vtk_to_numpy(polydata.GetPolys().GetConnectivityArray()).reshape(-1, 3)
- rr = np.ascontiguousarray(rr[:, ::-1])
- tris = np.ascontiguousarray(tris[:, ::-1])
+ rr = np.ascontiguousarray(rr)
+ tris = np.ascontiguousarray(tris)
out.append((rr, tris))
return out
diff --git a/mne/tests/test_epochs.py b/mne/tests/test_epochs.py
index 0dd35b64cf3..8cd7a9c9707 100644
--- a/mne/tests/test_epochs.py
+++ b/mne/tests/test_epochs.py
@@ -1349,7 +1349,7 @@ def test_epochs_io_preload(tmp_path, preload):
epochs_no_bl.save(temp_fname_no_bl, overwrite=True)
epochs_read = read_epochs(temp_fname)
epochs_no_bl_read = read_epochs(temp_fname_no_bl)
- with pytest.raises(ValueError, match="invalid"):
+ with pytest.raises(ValueError, match="exactly two elements"):
epochs.apply_baseline(baseline=[1, 2, 3])
epochs_with_bl = epochs_no_bl_read.copy().apply_baseline(baseline)
assert isinstance(epochs_with_bl, BaseEpochs)
diff --git a/mne/tests/test_event.py b/mne/tests/test_event.py
index af9cd2bc7c9..9bd8a483883 100644
--- a/mne/tests/test_event.py
+++ b/mne/tests/test_event.py
@@ -424,6 +424,12 @@ def test_pick_events():
[[1, 0, 1], [2, 1, 0], [4, 4, 2], [5, 2, 0]],
)
+ with pytest.raises(TypeError, match="must be an integer or a list"):
+ pick_events(events, include=1.2)
+
+ with pytest.raises(TypeError, match="must be an integer or a list"):
+ pick_events(events, include={"a": 1})
+
def test_make_fixed_length_events():
"""Test making events of a fixed length."""
diff --git a/mne/tests/test_surface.py b/mne/tests/test_surface.py
index 756e3ad1059..d3b34adf211 100644
--- a/mne/tests/test_surface.py
+++ b/mne/tests/test_surface.py
@@ -245,12 +245,13 @@ def test_normal_orth():
# 0.06 s locally even with all these params
@pytest.mark.parametrize("dtype", (np.float64, np.uint16, ">i4"))
+@pytest.mark.parametrize("order", "FC")
@pytest.mark.parametrize("value", (1, 12))
@pytest.mark.parametrize("smooth", (0, 0.9))
-def test_marching_cubes(dtype, value, smooth):
+def test_marching_cubes(dtype, value, smooth, order):
"""Test creating surfaces via marching cubes."""
pytest.importorskip("pyvista")
- data = np.zeros((50, 50, 50), dtype=dtype)
+ data = np.zeros((50, 50, 50), dtype=dtype, order=order)
data[20:30, 20:30, 20:30] = value
level = [value]
out = _marching_cubes(data, level, smooth=smooth)
@@ -260,8 +261,7 @@ def test_marching_cubes(dtype, value, smooth):
rtol = 1e-2 if smooth else 1e-9
assert_allclose(verts.sum(axis=0), [14700, 14700, 14700], rtol=rtol)
tri_sum = triangles.sum(axis=0).tolist()
- # old VTK (9.2.6), new VTK
- assert tri_sum in [[363402, 360865, 350588], [364089, 359867, 350408]]
+ assert tri_sum in ([350588, 360865, 363402], [350408, 359867, 364089])
# test fill holes
data[24:27, 24:27, 24:27] = 0
verts, triangles = _marching_cubes(data, level, smooth=smooth, fill_hole_size=2)[0]
diff --git a/mne/utils/__init__.py b/mne/utils/__init__.py
index 0f7591b4f5a..92a9a7212b5 100644
--- a/mne/utils/__init__.py
+++ b/mne/utils/__init__.py
@@ -28,6 +28,7 @@
_check_if_nan,
_is_numeric,
_ensure_int,
+ _check_integer_or_list,
_check_preload,
_validate_type,
_check_range,
diff --git a/mne/utils/check.py b/mne/utils/check.py
index cbb2a3b5f57..51b72923ba2 100644
--- a/mne/utils/check.py
+++ b/mne/utils/check.py
@@ -4,6 +4,7 @@
# License: BSD-3-Clause
from builtins import input # no-op here but facilitates testing
+from collections.abc import Sequence
from difflib import get_close_matches
from importlib import import_module
import operator
@@ -35,6 +36,16 @@ def _ensure_int(x, name="unknown", must_be="an int", *, extra=""):
return x
+def _check_integer_or_list(arg, name):
+ """Validate arguments that should be an integer or a list.
+
+ Always returns a list.
+ """
+ if not isinstance(arg, list):
+ arg = [_ensure_int(arg, name=name, must_be="an integer or a list")]
+ return arg
+
+
def check_fname(fname, filetype, endings, endings_err=()):
"""Enforce MNE filename conventions.
@@ -247,7 +258,7 @@ def _check_fname(
if not os.access(fname, os.R_OK):
raise PermissionError(f"{name} does not have read permissions: {fname}")
elif must_exist:
- raise FileNotFoundError(f"{name} does not exist: {fname}")
+ raise FileNotFoundError(f'{name} does not exist: "{fname}"')
return fname
@@ -515,6 +526,7 @@ def __instancecheck__(cls, other):
"path-like": path_like,
"int-like": (int_like,),
"callable": (_Callable(),),
+ "array-like": (Sequence, np.ndarray),
}
@@ -528,9 +540,8 @@ def _validate_type(item, types=None, item_name=None, type_name=None, *, extra=""
types : type | str | tuple of types | tuple of str
The types to be checked against.
If str, must be one of {'int', 'int-like', 'str', 'numeric', 'info',
- 'path-like', 'callable'}.
- If a tuple of str is passed, use 'int-like' and not 'int' for
- integers.
+ 'path-like', 'callable', 'array-like'}.
+ If a tuple of str is passed, use 'int-like' and not 'int' for integers.
item_name : str | None
Name of the item to show inside the error message.
type_name : str | None
diff --git a/mne/utils/docs.py b/mne/utils/docs.py
index 6369c38a31e..9a22144ab44 100644
--- a/mne/utils/docs.py
+++ b/mne/utils/docs.py
@@ -2351,7 +2351,7 @@ def _reflow_param_docstring(docstring, has_first_line=True, width=75):
] = """
mapping : dict | callable
A dictionary mapping the old channel to a new channel name
- e.g. {'EEG061' : 'EEG161'}. Can also be a callable function
+ e.g. ``{'EEG061' : 'EEG161'}``. Can also be a callable function
that takes and returns a string.
.. versionchanged:: 0.10.0
diff --git a/mne/viz/_brain/_brain.py b/mne/viz/_brain/_brain.py
index ca37b6ce116..b0ebce4a50c 100644
--- a/mne/viz/_brain/_brain.py
+++ b/mne/viz/_brain/_brain.py
@@ -433,8 +433,12 @@ def __init__(
normals=self.geo[h].nn,
)
mesh.map() # send to GPU
+ if self.geo[h].bin_curv is None:
+ scalars = mesh._default_scalars[:, 0]
+ else:
+ scalars = self.geo[h].bin_curv
mesh.add_overlay(
- scalars=self.geo[h].bin_curv,
+ scalars=scalars,
colormap=geo_kwargs["colormap"],
rng=[geo_kwargs["vmin"], geo_kwargs["vmax"]],
opacity=alpha,
diff --git a/mne/viz/_brain/surface.py b/mne/viz/_brain/surface.py
index 07ea77a6fd1..ce7bb9c974a 100644
--- a/mne/viz/_brain/surface.py
+++ b/mne/viz/_brain/surface.py
@@ -175,12 +175,17 @@ def z(self):
def load_curvature(self):
"""Load in curvature values from the ?h.curv file."""
curv_path = path.join(self.data_path, "surf", "%s.curv" % self.hemi)
- self.curv = read_curvature(curv_path, binary=False)
- self.bin_curv = np.array(self.curv > 0, np.int64)
+ if path.isfile(curv_path):
+ self.curv = read_curvature(curv_path, binary=False)
+ self.bin_curv = np.array(self.curv > 0, np.int64)
+ color = (self.curv > 0).astype(float)
+ else:
+ self.curv = None
+ self.bin_curv = None
+ color = np.ones((self.coords.shape[0]))
# morphometry (curvature) normalization in order to get gray cortex
# TODO: delete self.grey_curv after cortex parameter
# will be fully supported
- color = (self.curv > 0).astype(float)
color = 0.5 - (color - 0.5) / 3
color = color[:, np.newaxis] * [1, 1, 1]
self.grey_curv = color
diff --git a/mne/viz/_brain/tests/test_brain.py b/mne/viz/_brain/tests/test_brain.py
index a9ef1743d3c..a4ad70e41a0 100644
--- a/mne/viz/_brain/tests/test_brain.py
+++ b/mne/viz/_brain/tests/test_brain.py
@@ -424,26 +424,31 @@ def __init__(self):
info["chs"][0]["coord_frame"] = 99
with pytest.raises(RuntimeError, match='must be "meg", "head" or "mri"'):
brain.add_sensors(info, trans=fname_trans)
+ brain.close()
# test sEEG projection onto inflated
# make temp path to fake pial surface
os.makedirs(tmp_path / subject / "surf", exist_ok=True)
for hemi in ("lh", "rh"):
- # fake white surface for pial
+ # fake white surface for pial, and no .curv file
copyfile(
subjects_dir / subject / "surf" / f"{hemi}.white",
tmp_path / subject / "surf" / f"{hemi}.pial",
)
- copyfile(
- subjects_dir / subject / "surf" / f"{hemi}.curv",
- tmp_path / subject / "surf" / f"{hemi}.curv",
- )
copyfile(
subjects_dir / subject / "surf" / f"{hemi}.inflated",
tmp_path / subject / "surf" / f"{hemi}.inflated",
)
-
- brain._subjects_dir = tmp_path
+ brain = Brain(
+ hemi=hemi,
+ surf=surf,
+ size=size,
+ title=title,
+ cortex=cortex,
+ units="m",
+ subject=subject,
+ subjects_dir=tmp_path,
+ )
proj_info = create_info([f"Ch{i}" for i in range(1, 7)], 1000, "seeg")
pos = (
np.array(
diff --git a/mne/viz/backends/_pyvista.py b/mne/viz/backends/_pyvista.py
index ecfc8bc3f01..affb1f34bfd 100644
--- a/mne/viz/backends/_pyvista.py
+++ b/mne/viz/backends/_pyvista.py
@@ -43,7 +43,11 @@
import pyvista
from pyvista import Plotter, PolyData, Line, close_all, UnstructuredGrid
from pyvistaqt import BackgroundPlotter
- from pyvista.plotting.plotting import _ALL_PLOTTERS
+
+ try:
+ from pyvista.plotting.plotter import _ALL_PLOTTERS
+ except Exception: # PV < 0.40
+ from pyvista.plotting.plotting import _ALL_PLOTTERS
from vtkmodules.vtkCommonCore import vtkCommand, vtkLookupTable, VTK_UNSIGNED_CHAR
from vtkmodules.vtkCommonDataModel import VTK_VERTEX, vtkPiecewiseFunction
@@ -989,7 +993,10 @@ def _volume(
center,
):
# Now we can actually construct the visualization
- grid = pyvista.UniformGrid()
+ try:
+ grid = pyvista.ImageData()
+ except AttributeError: # PV < 0.40
+ grid = pyvista.UniformGrid()
grid.dimensions = dimensions + 1 # inject data on the cells
grid.origin = origin
grid.spacing = spacing
diff --git a/mne/viz/backends/_qt.py b/mne/viz/backends/_qt.py
index 07b2da7aaf6..49dd20d5dbd 100644
--- a/mne/viz/backends/_qt.py
+++ b/mne/viz/backends/_qt.py
@@ -269,7 +269,8 @@ class _Button(QPushButton, _AbstractButton, _Widget, metaclass=_BaseWidget):
def __init__(self, value, callback, icon=None):
_AbstractButton.__init__(value=value, callback=callback)
_Widget.__init__(self)
- QPushButton.__init__(self)
+ with _disabled_init(_AbstractButton):
+ QPushButton.__init__(self)
self.setText(value)
self.released.connect(callback)
if icon:
@@ -288,7 +289,8 @@ def __init__(self, value, rng, callback, horizontal=True):
value=value, rng=rng, callback=callback, horizontal=horizontal
)
_Widget.__init__(self)
- QSlider.__init__(self, Qt.Horizontal if horizontal else Qt.Vertical)
+ with _disabled_init(_AbstractSlider):
+ QSlider.__init__(self, Qt.Horizontal if horizontal else Qt.Vertical)
self.setMinimum(rng[0])
self.setMaximum(rng[1])
self.setValue(value)
@@ -322,7 +324,8 @@ class _CheckBox(QCheckBox, _AbstractCheckBox, _Widget, metaclass=_BaseWidget):
def __init__(self, value, callback):
_AbstractCheckBox.__init__(value=value, callback=callback)
_Widget.__init__(self)
- QCheckBox.__init__(self)
+ with _disabled_init(_AbstractCheckBox):
+ QCheckBox.__init__(self)
self.setChecked(value)
self.stateChanged.connect(lambda x: callback(bool(x)))
@@ -337,7 +340,8 @@ class _SpinBox(QDoubleSpinBox, _AbstractSpinBox, _Widget, metaclass=_BaseWidget)
def __init__(self, value, rng, callback, step=None):
_AbstractSpinBox.__init__(value=value, rng=rng, callback=callback, step=step)
_Widget.__init__(self)
- QDoubleSpinBox.__init__(self)
+ with _disabled_init(_AbstractSpinBox):
+ QDoubleSpinBox.__init__(self)
self.setAlignment(Qt.AlignCenter)
self.setMinimum(rng[0])
self.setMaximum(rng[1])
@@ -360,7 +364,8 @@ class _ComboBox(QComboBox, _AbstractComboBox, _Widget, metaclass=_BaseWidget):
def __init__(self, value, items, callback):
_AbstractComboBox.__init__(value=value, items=items, callback=callback)
_Widget.__init__(self)
- QComboBox.__init__(self)
+ with _disabled_init(_AbstractComboBox):
+ QComboBox.__init__(self)
self.addItems(items)
self.setCurrentText(value)
self.currentTextChanged.connect(callback)
@@ -377,7 +382,8 @@ class _RadioButtons(QVBoxLayout, _AbstractRadioButtons, _Widget, metaclass=_Base
def __init__(self, value, items, callback):
_AbstractRadioButtons.__init__(value=value, items=items, callback=callback)
_Widget.__init__(self)
- QVBoxLayout.__init__(self)
+ with _disabled_init(_AbstractRadioButtons):
+ QVBoxLayout.__init__(self)
self._button_group = QButtonGroup()
self._button_group.setExclusive(True)
for val in items:
@@ -401,7 +407,8 @@ class _GroupBox(QGroupBox, _AbstractGroupBox, _Widget, metaclass=_BaseWidget):
def __init__(self, name, items):
_AbstractGroupBox.__init__(name=name, items=items)
_Widget.__init__(self)
- QGroupBox.__init__(self, name)
+ with _disabled_init(_AbstractGroupBox):
+ QGroupBox.__init__(self, name)
self._layout = _VBoxLayout()
for item in items:
self._layout._add_widget(item)
@@ -455,7 +462,8 @@ class _PlayMenu(QVBoxLayout, _AbstractPlayMenu, _Widget, metaclass=_BaseWidget):
def __init__(self, value, rng, callback):
_AbstractPlayMenu.__init__(value=value, rng=rng, callback=callback)
_Widget.__init__(self)
- QVBoxLayout.__init__(self)
+ with _disabled_init(_AbstractPlayMenu):
+ QVBoxLayout.__init__(self)
self._slider = QSlider(Qt.Horizontal)
self._slider.setMinimum(rng[0])
self._slider.setMaximum(rng[1])
@@ -540,7 +548,8 @@ def __init__(
window=window,
)
_Widget.__init__(self)
- QMessageBox.__init__(self, parent=window)
+ with _disabled_init(_AbstractPopup):
+ QMessageBox.__init__(self, parent=window)
self.setWindowTitle(title)
self.setText(text)
# icon is one of _Dialog.supported_icon_names
@@ -693,9 +702,22 @@ def _set_size(self, width=None, height=None):
# https://github.com/mne-tools/mne-python/issues/9182
+# This is necessary to make PySide6 happy -- something weird with the
+# __init__ calling causes the _AbstractXYZ class __init__ to be called twice
+@contextmanager
+def _disabled_init(klass):
+ orig = klass.__init__
+ klass.__init__ = lambda *args, **kwargs: None
+ try:
+ yield
+ finally:
+ klass.__init__ = orig
+
+
class _MNEMainWindow(MainWindow):
def __init__(self, parent=None, title=None, size=None):
- MainWindow.__init__(self, parent=parent, title=title, size=size)
+ with _disabled_init(_Widget):
+ MainWindow.__init__(self, parent=parent, title=title, size=size)
self.setAttribute(Qt.WA_ShowWithoutActivating, True)
self.setAttribute(Qt.WA_DeleteOnClose, True)
diff --git a/mne/viz/evoked.py b/mne/viz/evoked.py
index 70c7ef7512a..19a5dbda199 100644
--- a/mne/viz/evoked.py
+++ b/mne/viz/evoked.py
@@ -861,9 +861,9 @@ def _add_nave(ax, nave):
if nave is not None:
ax.annotate(
r"N$_{\mathrm{ave}}$=%d" % nave,
- ha="left",
+ ha="right",
va="bottom",
- xy=(0, 1),
+ xy=(1, 1),
xycoords="axes fraction",
xytext=(0, 5),
textcoords="offset pixels",
diff --git a/mne/viz/utils.py b/mne/viz/utils.py
index bdf51d8202d..e9802e872d2 100644
--- a/mne/viz/utils.py
+++ b/mne/viz/utils.py
@@ -1017,6 +1017,8 @@ def plot_sensors(
sphere=None,
pointsize=None,
linewidth=2,
+ *,
+ cmap=None,
verbose=None,
):
"""Plot sensors positions.
@@ -1074,6 +1076,10 @@ def plot_sensors(
``kind='3d'``, or ``25`` otherwise.
linewidth : float
The width of the outline. If ``0``, the outline will not be drawn.
+ cmap : str | instance of matplotlib.colors.Colormap | None
+ Colormap for coloring ch_groups. Has effect only when ``ch_groups``
+ is list of list. If None, set to ``matplotlib.rcParams["image.cmap"]``.
+ Defaults to None.
%(verbose)s
Returns
@@ -1197,10 +1203,9 @@ def plot_sensors(
color = np.mean(_rgb(x, y, z), axis=0)
color_vals[idx, :3] = color # mean of spatial color
else: # array-like
- import matplotlib.pyplot as plt
-
+ cmap = _get_cmap(cmap)
colors = np.linspace(0, 1, len(ch_groups))
- color_vals = [plt.cm.jet(colors[i]) for i in range(len(ch_groups))]
+ color_vals = [cmap(colors[i]) for i in range(len(ch_groups))]
colors = np.zeros((len(picks), 4))
for pick_idx, pick in enumerate(picks):
for ind, value in enumerate(ch_groups):
diff --git a/requirements.txt b/requirements.txt
index 6baf9465326..68c2553b87d 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -9,7 +9,7 @@ h5io
packaging
pymatreader
qtpy
-PySide6!=6.3.0,!=6.4.0,!=6.4.0.1,!=6.5.0 # incompat with Matplotlib 3.6.1 and qtpy
+PySide6!=6.5.1
pyobjc-framework-Cocoa>=5.2.0; platform_system=="Darwin"
sip
scikit-learn
diff --git a/tutorials/preprocessing/90_eyetracking_data.py b/tutorials/preprocessing/90_eyetracking_data.py
index 3c3a9d84b09..07b6846f768 100644
--- a/tutorials/preprocessing/90_eyetracking_data.py
+++ b/tutorials/preprocessing/90_eyetracking_data.py
@@ -36,12 +36,58 @@
from mne import Epochs, find_events
from mne.io import read_raw_eyelink
from mne.datasets.eyelink import data_path
+from mne.preprocessing.eyetracking import read_eyelink_calibration
eyelink_fname = data_path() / "mono_multi-block_multi-DINS.asc"
raw = read_raw_eyelink(eyelink_fname, create_annotations=["blinks", "messages"])
raw.crop(tmin=0, tmax=146)
+# %%
+# Checking the calibration
+# ------------------------
+#
+# We can also load the calibrations from the recording and visualize them.
+# Checking the quality of the calibration is a useful first step in assessing
+# the quality of the eye tracking data. Note that
+# :func:`~mne.preprocessing.eyetracking.read_eyelink_calibration`
+# will return a list of :class:`~mne.preprocessing.eyetracking.Calibration` instances,
+# one for each calibration. We can index that list to access a specific calibration.
+
+cals = read_eyelink_calibration(eyelink_fname)
+print(f"number of calibrations: {len(cals)}")
+first_cal = cals[0] # let's access the first (and only in this case) calibration
+print(first_cal)
+
+# %%
+# Here we can see that a 5-point calibration was performed at the beginning of
+# the recording. Note that you can access the calibration information using
+# dictionary style indexing:
+
+print(f"Eye calibrated: {first_cal['eye']}")
+print(f"Calibration model: {first_cal['model']}")
+print(f"Calibration average error: {first_cal['avg_error']}")
+
+# %%
+# The data for individual calibration points are stored as :class:`numpy.ndarray`
+# arrays, in the ``'positions'``, ``'gaze'``, and ``'offsets'`` keys. ``'positions'``
+# contains the x and y coordinates of each calibration point. ``'gaze'`` contains the
+# x and y coordinates of the actual gaze position for each calibration point.
+# ``'offsets'`` contains the offset (in visual degrees) between the calibration position
+# and the actual gaze position for each calibration point. Below is an example of
+# how to access these data:
+print(f"offset of the first calibration point: {first_cal['offsets'][0]}")
+print(f"offset for each calibration point: {first_cal['offsets']}")
+print(f"x-coordinate for each calibration point: {first_cal['positions'].T[0]}")
+
+# %%
+# Let's plot the calibration to get a better look. Below we see the location that each
+# calibration point was displayed (gray dots), the positions of the actual gaze (red),
+# and the offsets (in visual degrees) between the calibration position and the actual
+# gaze position of each calibration point.
+
+first_cal.plot(show_offsets=True)
+
# %%
# Get stimulus events from DIN channel
# ------------------------------------
@@ -70,7 +116,8 @@
# categorized as blinks). Also, notice that we have passed a custom `dict` into
# the scalings argument of ``raw.plot``. This is necessary to make the eyegaze
# channel traces legible when plotting, since the file contains pixel position
-# data (as opposed to eye angles, which are reported in radians).
+# data (as opposed to eye angles, which are reported in radians). We also could
+# have simply passed ``scalings='auto'``.
raw.plot(
events=events,
@@ -102,7 +149,7 @@
# It is important to note that pupil size data are reported by Eyelink (and
# stored internally by MNE) as arbitrary units (AU). While it often can be
# preferable to convert pupil size data to millimeters, this requires
-# information that is not always present in the file. MNE does not currently
+# information that is not present in the file. MNE does not currently
# provide methods to convert pupil size data.
# See :ref:`tut-importing-eyetracking-data` for more information on pupil size
# data.