diff --git a/docs/source/bibliography.bib b/docs/source/bibliography.bib index 46f3c7309..0d9bfc901 100644 --- a/docs/source/bibliography.bib +++ b/docs/source/bibliography.bib @@ -1,3 +1,22 @@ +@article{CodeComprehension, +author = {Alakmeh, Tarek and Reich, David and J\"{a}ger, Lena and Fritz, Thomas}, +title = {Predicting Code Comprehension: A Novel Approach to Align Human Gaze with Code using Deep Neural Networks}, +year = {2024}, +issue_date = {July 2024}, +publisher = {Association for Computing Machinery}, +address = {New York, NY, USA}, +volume = {1}, +number = {FSE}, +url = {https://doi.org/10.1145/3660795}, +doi = {10.1145/3660795}, +abstract = {The better the code quality and the less complex the code, the easier it is for software developers to comprehend and evolve it. Yet, how do we best detect quality concerns in the code? Existing measures to assess code quality, such as McCabe’s cyclomatic complexity, are decades old and neglect the human aspect. Research has shown that considering how a developer reads and experiences the code can be an indicator of its quality. In our research, we built on these insights and designed, trained, and evaluated the first deep neural network that aligns a developer’s eye gaze with the code tokens the developer looks at to predict code comprehension and perceived difficulty. To train and analyze our approach, we performed an experiment in which 27 participants worked on a range of 16 short code comprehension tasks while we collected fine-grained gaze data using an eye tracker. The results of our evaluation show that our deep neural sequence model that integrates both the human gaze and the stimulus code, can predict (a) code comprehension and (b) the perceived code difficulty significantly better than current state-of-the-art reference methods. We also show that aligning human gaze with code leads to better performance than models that rely solely on either code or human gaze. We discuss potential applications and propose future work to build better human-inclusive code evaluation systems.}, +journal = {Proc. ACM Softw. Eng.}, +month = {jul}, +articleno = {88}, +numpages = {23}, +keywords = {code comprehension, code-fixation attention, eye-tracking, lab experiment, neural networks} +} + @inproceedings{CopCoL1Hollenstein, title = "The Copenhagen Corpus of Eye Tracking Recordings from Natural Reading of {D}anish Texts", author = {Hollenstein, Nora and diff --git a/src/pymovements/datasets/__init__.py b/src/pymovements/datasets/__init__.py index d68e6c1bd..81e45c14c 100644 --- a/src/pymovements/datasets/__init__.py +++ b/src/pymovements/datasets/__init__.py @@ -25,6 +25,7 @@ :toctree: :template: class.rst + pymovements.datasets.CodeComprehension pymovements.datasets.CopCo pymovements.datasets.DIDEC pymovements.datasets.EMTeC @@ -47,6 +48,7 @@ pymovements.datasets.ToyDataset pymovements.datasets.ToyDatasetEyeLink """ +from pymovements.datasets.codecomprehension import CodeComprehension from pymovements.datasets.copco import CopCo from pymovements.datasets.didec import DIDEC from pymovements.datasets.emtec import EMTeC @@ -64,6 +66,7 @@ __all__ = [ + 'CodeComprehension', 'CopCo', 'DIDEC', 'EMTeC', diff --git a/src/pymovements/datasets/codecomprehension.py b/src/pymovements/datasets/codecomprehension.py new file mode 100644 index 000000000..c46748643 --- /dev/null +++ b/src/pymovements/datasets/codecomprehension.py @@ -0,0 +1,198 @@ +# Copyright (c) 2022-2024 The pymovements Project Authors +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +"""Provides a definition for the CodeComprehension dataset.""" +from __future__ import annotations + +from dataclasses import dataclass +from dataclasses import field +from typing import Any + +import polars as pl + +from pymovements.dataset.dataset_definition import DatasetDefinition +from pymovements.dataset.dataset_library import register_dataset +from pymovements.gaze.experiment import Experiment + + +@dataclass +@register_dataset +class CodeComprehension(DatasetDefinition): + """CodeComprehension dataset :cite:p:`CodeComprehension`. + + This dataset includes eye-tracking-while-code-reading data from a participants in a single + session. Eye movements are recorded at a sampling frequency of 1,000 Hz using an + EyeLink 1000 eye tracker and are provided as pixel coordinates. + + The participant is instructed to read the code snippet and answer a code comprehension question. + + Attributes + ---------- + name: str + The name of the dataset. + + has_files: dict[str, bool] + Indicate whether the dataset contains 'gaze', 'precomputed_events', and + 'precomputed_reading_measures'. + + mirrors: dict[str, tuple[str, ...]] + A tuple of mirrors of the dataset. Each entry must be of type `str` and end with a '/'. + + resources: dict[str, tuple[dict[str, str], ...]] + A tuple of dataset gaze_resources. Each list entry must be a dictionary with the following + keys: + - `resource`: The url suffix of the resource. This will be concatenated with the mirror. + - `filename`: The filename under which the file is saved as. + - `md5`: The MD5 checksum of the respective file. + + extract: dict[str, bool] + Decide whether to extract the data. + + experiment: Experiment + The experiment definition. + + filename_format: dict[str, str] + Regular expression which will be matched before trying to load the file. Namedgroups will + appear in the `fileinfo` dataframe. + + filename_format_schema_overrides: dict[str, dict[str, type]] + If named groups are present in the `filename_format`, this makes it possible to cast + specific named groups to a particular datatype. + + trial_columns: list[str] + The name of the trial columns in the input data frame. If the list is empty or None, + the input data frame is assumed to contain only one trial. If the list is not empty, + the input data frame is assumed to contain multiple trials and the transformation + methods will be applied to each trial separately. + + time_column: str + The name of the timestamp column in the input data frame. This column will be renamed to + ``time``. + + time_unit: str + The unit of the timestamps in the timestamp column in the input data frame. Supported + units are 's' for seconds, 'ms' for milliseconds and 'step' for steps. If the unit is + 'step' the experiment definition must be specified. All timestamps will be converted to + milliseconds. + + pixel_columns: list[str] + The name of the pixel position columns in the input data frame. These columns will be + nested into the column ``pixel``. If the list is empty or None, the nested ``pixel`` + column will not be created. + + column_map: dict[str, str] + The keys are the columns to read, the values are the names to which they should be renamed. + + custom_read_kwargs: dict[str, dict[str, Any]] + If specified, these keyword arguments will be passed to the file reading function. + + Examples + -------- + Initialize your :py:class:`~pymovements.PublicDataset` object with the + :py:class:`~pymovements.CodeComprehension` definition: + + >>> import pymovements as pm + >>> + >>> dataset = pm.Dataset("CodeComprehension", path='data/CodeComprehension') + + Download the dataset resources: + + >>> dataset.download()# doctest: +SKIP + + Load the data into memory: + + >>> dataset.load()# doctest: +SKIP + """ + + # pylint: disable=similarities + # The PublicDatasetDefinition child classes potentially share code chunks for definitions. + + name: str = 'CodeComprehension' + + has_files: dict[str, bool] = field( + default_factory=lambda: { + 'gaze': False, + 'precomputed_events': True, + 'precomputed_reading_measures': False, + }, + ) + + mirrors: dict[str, tuple[str, ...]] = field( + default_factory=lambda: { + 'precomputed_events': ('https://zenodo.org/',), + }, + ) + + resources: dict[str, tuple[dict[str, str], ...]] = field( + default_factory=lambda: { + 'precomputed_events': ( + { + 'resource': + 'records/11123101/files/Predicting%20Code%20Comprehension%20Package' + '.zip?download=1', + 'filename': 'data.zip', + 'md5': '3a3c6fb96550bc2c2ddcf5d458fb12a2', + }, + ), + }, + ) + + extract: dict[str, bool] = field(default_factory=lambda: {'precomputed_events': True}) + + experiment: Experiment = Experiment( + screen_width_px=None, + screen_height_px=None, + screen_width_cm=None, + screen_height_cm=None, + distance_cm=None, + origin=None, + sampling_rate=2000, + ) + + filename_format: dict[str, str] = field( + default_factory=lambda: { + 'precomputed_events': r'fix_report_P{subject_id:s}.txt', + }, + ) + + filename_format_schema_overrides: dict[str, dict[str, type]] = field( + default_factory=lambda: { + 'precomputed_events': {'subject_id': pl.Utf8}, + }, + ) + + trial_columns: list[str] = field(default_factory=lambda: []) + + time_column: str = '' + + time_unit: str = '' + + pixel_columns: list[str] = field(default_factory=lambda: []) + + column_map: dict[str, str] = field(default_factory=lambda: {}) + + custom_read_kwargs: dict[str, dict[str, Any]] = field( + default_factory=lambda: { + 'precomputed_events': { + 'separator': '\t', + 'null_values': '.', + 'quote_char': '"', + }, + }, + ) diff --git a/tests/unit/datasets/datasets_test.py b/tests/unit/datasets/datasets_test.py index c6aecffa8..6a3e02d58 100644 --- a/tests/unit/datasets/datasets_test.py +++ b/tests/unit/datasets/datasets_test.py @@ -31,6 +31,7 @@ ('public_dataset', 'dataset_name'), # XXX: add public dataset in alphabetical order [ + pytest.param(pm.datasets.CodeComprehension, 'CodeComprehension', id='CodeComprehension'), pytest.param(pm.datasets.CopCo, 'CopCo', id='CopCo'), pytest.param(pm.datasets.DIDEC, 'DIDEC', id='DIDEC'), pytest.param(pm.datasets.EMTeC, 'EMTeC', id='EMTeC'),