diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9fe68ac4c4..6dac9fed16 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -13,7 +13,7 @@ exclude: | repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 + rev: v4.6.0 hooks: - id: check-added-large-files - id: check-ast @@ -24,16 +24,16 @@ repos: - id: trailing-whitespace args: [--markdown-linebreak-ext=md] - repo: https://github.com/adrienverge/yamllint - rev: 'v1.31.0' + rev: 'v1.35.1' hooks: - id: yamllint - repo: https://github.com/codespell-project/codespell - rev: 'v2.2.4' + rev: 'v2.3.0' hooks: - id: codespell additional_dependencies: [tomli] # required for Python 3.10 - repo: https://github.com/astral-sh/ruff-pre-commit - rev: "v0.4.10" + rev: "v0.6.8" hooks: - id: ruff args: [--fix] diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 76efeb2eca..26e110735f 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -6,7 +6,7 @@ In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, sex characteristics, gender identity and expression, -level of experience, education, socio-economic status, nationality, personal +level of experience, education, socioeconomic status, nationality, personal appearance, race, religion, or sexual identity and orientation. ## Our Standards diff --git a/doc/changelog.rst b/doc/changelog.rst index ad104a7b7d..68c4fe1792 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1024,7 +1024,7 @@ Highlights ~~~~~~~~~~ - ESMValCore now has the ability to automatically download missing data from ESGF. For details, see :ref:`Data Retrieval`. -- ESMValCore now also can resume an earlier run. This is useful to re-use expensive preprocessor results. For details, see :ref:`Running`. +- ESMValCore now also can resume an earlier run. This is useful to reuse expensive preprocessor results. For details, see :ref:`Running`. This release includes diff --git a/doc/contributing.rst b/doc/contributing.rst index 4188ebeb04..ee47974e90 100644 --- a/doc/contributing.rst +++ b/doc/contributing.rst @@ -101,7 +101,7 @@ Please keep the following considerations in mind when programming: code. - If you find yourself copy-pasting a piece of code and making minor changes to every copy, instead put the repeated bit of code in a function that you can - re-use, and provide the changed bits as function arguments. + reuse, and provide the changed bits as function arguments. - Be careful when changing existing unit tests to make your new feature work. You might be breaking existing features if you have to change existing tests. diff --git a/doc/quickstart/find_data.rst b/doc/quickstart/find_data.rst index 0765a7a9cd..e9077884f2 100644 --- a/doc/quickstart/find_data.rst +++ b/doc/quickstart/find_data.rst @@ -602,7 +602,7 @@ Key Description Default value if not recipe if default DRS is used) ```special_attr`` A special attribute in the filename No default `ACCESS-ESM` raw data, it's related to - frquency of raw data + frequency of raw data ``sub_dataset`` Part of the ACCESS-ESM raw dataset No default root, need to specify if you want to use the cmoriser diff --git a/doc/quickstart/run.rst b/doc/quickstart/run.rst index 5eca15e714..fec474f290 100644 --- a/doc/quickstart/run.rst +++ b/doc/quickstart/run.rst @@ -93,14 +93,14 @@ Possible values are: - `default`: fail if there are any errors. - `strict`: fail if there are any warnings. -To re-use pre-processed files from a previous run of the same recipe, you can +To reuse pre-processed files from a previous run of the same recipe, you can use .. code:: bash esmvaltool run recipe_example.yml --resume_from ~/esmvaltool_output/recipe_python_20210930_123907 -Multiple directories can be specified for re-use, make sure to quote them: +Multiple directories can be specified for reuse, make sure to quote them: .. code:: bash diff --git a/esmvalcore/cmor/_fixes/cmip6/mcm_ua_1_0.py b/esmvalcore/cmor/_fixes/cmip6/mcm_ua_1_0.py index 626cb4f608..7484449fcc 100644 --- a/esmvalcore/cmor/_fixes/cmip6/mcm_ua_1_0.py +++ b/esmvalcore/cmor/_fixes/cmip6/mcm_ua_1_0.py @@ -26,7 +26,7 @@ class AllVars(Fix): def fix_metadata(self, cubes): """Fix metadata. - Remove unnecessary spaces in metadat and rename ``var_name`` of + Remove unnecessary spaces in metadata and rename ``var_name`` of latitude and longitude and fix longitude boundary description may be wrong (lons=[0, ..., 356.25]; on_bnds=[[-1.875, 1.875], ..., [354.375, 360]]). diff --git a/notebooks/composing-recipes.ipynb b/notebooks/composing-recipes.ipynb index bb7259c31b..5156618d88 100644 --- a/notebooks/composing-recipes.ipynb +++ b/notebooks/composing-recipes.ipynb @@ -17,9 +17,10 @@ "metadata": {}, "outputs": [], "source": [ + "import yaml\n", + "\n", "from esmvalcore.config import CFG\n", - "from esmvalcore.dataset import Dataset, datasets_to_recipe\n", - "import yaml" + "from esmvalcore.dataset import Dataset, datasets_to_recipe" ] }, { @@ -38,7 +39,7 @@ "metadata": {}, "outputs": [], "source": [ - "CFG['search_esgf'] = 'always'" + "CFG[\"search_esgf\"] = \"always\"" ] }, { @@ -84,18 +85,18 @@ ], "source": [ "tas = Dataset(\n", - " short_name='tas',\n", - " mip='Amon',\n", - " project='CMIP6',\n", - " dataset='CanESM5-1',\n", - " ensemble='r1i1p1f1',\n", - " exp='historical',\n", - " grid='gn',\n", - " timerange='2000/2002',\n", + " short_name=\"tas\",\n", + " mip=\"Amon\",\n", + " project=\"CMIP6\",\n", + " dataset=\"CanESM5-1\",\n", + " ensemble=\"r1i1p1f1\",\n", + " exp=\"historical\",\n", + " grid=\"gn\",\n", + " timerange=\"2000/2002\",\n", ")\n", - "tas['diagnostic'] = 'diagnostic_name'\n", + "tas[\"diagnostic\"] = \"diagnostic_name\"\n", "\n", - "pr = tas.copy(short_name='pr')\n", + "pr = tas.copy(short_name=\"pr\")\n", "\n", "print(yaml.safe_dump(datasets_to_recipe([tas, pr])))" ] @@ -127,14 +128,14 @@ ], "source": [ "dataset_template = Dataset(\n", - " short_name='tas',\n", - " mip='Amon',\n", - " project='CMIP6',\n", - " exp='historical',\n", - " dataset='*',\n", - " institute='*',\n", - " ensemble='*',\n", - " grid='*',\n", + " short_name=\"tas\",\n", + " mip=\"Amon\",\n", + " project=\"CMIP6\",\n", + " exp=\"historical\",\n", + " dataset=\"*\",\n", + " institute=\"*\",\n", + " ensemble=\"*\",\n", + " grid=\"*\",\n", ")\n", "datasets = list(dataset_template.from_files())\n", "len(datasets)" @@ -584,7 +585,7 @@ ], "source": [ "for dataset in datasets:\n", - " dataset.facets['diagnostic'] = 'diagnostic_name'\n", + " dataset.facets[\"diagnostic\"] = \"diagnostic_name\"\n", "print(yaml.safe_dump(datasets_to_recipe(datasets)))" ] } diff --git a/notebooks/discovering-data.ipynb b/notebooks/discovering-data.ipynb index 923c915ac7..d6c9001ef2 100644 --- a/notebooks/discovering-data.ipynb +++ b/notebooks/discovering-data.ipynb @@ -19,9 +19,8 @@ "outputs": [], "source": [ "from esmvalcore.config import CFG\n", - "from esmvalcore.dataset import Dataset, datasets_to_recipe\n", - "from esmvalcore.esgf import download\n", - "import yaml" + "from esmvalcore.dataset import Dataset\n", + "from esmvalcore.esgf import download" ] }, { @@ -40,7 +39,7 @@ "metadata": {}, "outputs": [], "source": [ - "CFG['search_esgf'] = 'always'" + "CFG[\"search_esgf\"] = \"always\"" ] }, { @@ -60,14 +59,14 @@ "outputs": [], "source": [ "dataset_template = Dataset(\n", - " short_name='tas',\n", - " mip='Amon',\n", - " project='CMIP6',\n", - " exp='historical',\n", - " dataset='*',\n", - " institute='*',\n", - " ensemble='*',\n", - " grid='*',\n", + " short_name=\"tas\",\n", + " mip=\"Amon\",\n", + " project=\"CMIP6\",\n", + " exp=\"historical\",\n", + " dataset=\"*\",\n", + " institute=\"*\",\n", + " ensemble=\"*\",\n", + " grid=\"*\",\n", ")" ] }, @@ -292,7 +291,7 @@ } ], "source": [ - "dataset.files[0].download(CFG['download_dir'])" + "dataset.files[0].download(CFG[\"download_dir\"])" ] }, { @@ -311,7 +310,7 @@ "metadata": {}, "outputs": [], "source": [ - "download(dataset.files, CFG['download_dir'])" + "download(dataset.files, CFG[\"download_dir\"])" ] } ], diff --git a/notebooks/loading-and-processing-data.ipynb b/notebooks/loading-and-processing-data.ipynb index 1487f3f10c..bb85566430 100644 --- a/notebooks/loading-and-processing-data.ipynb +++ b/notebooks/loading-and-processing-data.ipynb @@ -19,13 +19,13 @@ "source": [ "%matplotlib inline\n", "\n", - "import matplotlib.pyplot as plt\n", "import iris.quickplot\n", + "import matplotlib.pyplot as plt\n", "\n", "from esmvalcore.config import CFG\n", "from esmvalcore.dataset import Dataset\n", - "from esmvalcore.esgf import download, ESGFFile\n", - "from esmvalcore.preprocessor import area_statistics, annual_statistics" + "from esmvalcore.esgf import ESGFFile, download\n", + "from esmvalcore.preprocessor import annual_statistics, area_statistics" ] }, { @@ -43,7 +43,7 @@ "metadata": {}, "outputs": [], "source": [ - "CFG['search_esgf'] = 'when_missing'" + "CFG[\"search_esgf\"] = \"when_missing\"" ] }, { @@ -80,13 +80,13 @@ ], "source": [ "tas = Dataset(\n", - " short_name='tas',\n", - " mip='Amon',\n", - " project='CMIP5',\n", - " dataset='MPI-ESM-MR',\n", - " ensemble='r1i1p1',\n", - " exp='historical',\n", - " timerange='1850/2000',\n", + " short_name=\"tas\",\n", + " mip=\"Amon\",\n", + " project=\"CMIP5\",\n", + " dataset=\"MPI-ESM-MR\",\n", + " ensemble=\"r1i1p1\",\n", + " exp=\"historical\",\n", + " timerange=\"1850/2000\",\n", ")\n", "tas" ] @@ -124,7 +124,7 @@ } ], "source": [ - "tas.add_supplementary(short_name='areacella', mip='fx', ensemble='r0i0p0')\n", + "tas.add_supplementary(short_name=\"areacella\", mip=\"fx\", ensemble=\"r0i0p0\")\n", "tas.supplementaries" ] }, @@ -247,7 +247,7 @@ "for supplementary_ds in tas.supplementaries:\n", " files.extend(supplementary_ds.files)\n", "files = [file for file in files if isinstance(file, ESGFFile)]\n", - "download(files, CFG['download_dir'])\n", + "download(files, CFG[\"download_dir\"])\n", "tas.find_files()\n", "print(tas.files)\n", "for supplementary_ds in tas.supplementaries:\n", @@ -548,9 +548,9 @@ "metadata": {}, "outputs": [], "source": [ - "cube = area_statistics(cube, operator='mean')\n", - "cube = annual_statistics(cube, operator='mean')\n", - "cube.convert_units('degrees_C')" + "cube = area_statistics(cube, operator=\"mean\")\n", + "cube = annual_statistics(cube, operator=\"mean\")\n", + "cube.convert_units(\"degrees_C\")" ] }, { diff --git a/pyproject.toml b/pyproject.toml index 044d95fa64..5a45ca2ab9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ version_scheme = "release-branch-semver" [tool.codespell] skip = "*.ipynb,esmvalcore/config/extra_facets/ipslcm-mappings.yml" -ignore-words-list = "vas,hist" +ignore-words-list = "vas,hist,oce" [tool.pylint.main] jobs = 1 # Running more than one job in parallel crashes prospector. diff --git a/tests/unit/preprocessor/_volume/test_volume.py b/tests/unit/preprocessor/_volume/test_volume.py index 69f0377c45..682d42fd45 100644 --- a/tests/unit/preprocessor/_volume/test_volume.py +++ b/tests/unit/preprocessor/_volume/test_volume.py @@ -657,46 +657,42 @@ def test_volume_statistics_invalid_bounds(self): with self.assertRaises(ValueError) as err: volume_statistics(self.grid_invalid_z_bounds, "mean") - self.assertIn( + assert ( "Z axis bounds shape found (3, 2, 2, 4). Bounds should be " - "2 in the last dimension to compute the thickness.", - str(err.exception), - ) + "2 in the last dimension to compute the thickness." + ) in str(err.exception) def test_volume_statistics_invalid_units(self): """Test z-axis units cannot be converted to m""" with self.assertRaises(ValueError) as err: volume_statistics(self.grid_4d_sigma_space, "mean") - self.assertIn( + assert ( "Cannot compute volume using the Z-axis. " - "Unable to convert from 'Unit('kg m-3')' to 'Unit('m')'.", - str(err.exception), - ) + "Unable to convert from 'Unit('kg m-3')' to 'Unit('m')'." + ) in str(err.exception) def test_volume_statistics_z_axis_time_error(self): # Fails because depth z-axis coord depends on time dimensions # which would aggregate also along that dimension with self.assertRaises(ValueError) as err: volume_statistics(self.grid_4d_z, "mean") - self.assertIn( + assert ( "X and Y axis coordinates depend on (2, 3) dimensions, " "while X, Y, and Z axis depends on (0, 1, 2, 3) dimensions. " "This may indicate Z axis depending on other dimension than " - "space that could provoke invalid aggregation...", - str(err.exception), - ) + "space that could provoke invalid aggregation..." + ) in str(err.exception) grid_3d_no_x = self.grid_4d_z[..., 0] with self.assertRaises(ValueError) as err: volume_statistics(grid_3d_no_x, "mean") - self.assertIn( + assert ( "X and Y axis coordinates depend on (2,) dimensions, " "while X, Y, and Z axis depends on (0, 1, 2) dimensions. " "This may indicate Z axis depending on other dimension than " - "space that could provoke invalid aggregation...", - str(err.exception), - ) + "space that could provoke invalid aggregation..." + ) in str(err.exception) def test_volume_statistics_missing_axis(self): # x axis is missing @@ -711,9 +707,7 @@ def test_volume_statistics_missing_axis(self): grid_no_z = self.grid_4d[:, 0] with self.assertRaises(ValueError) as err: volume_statistics(grid_no_z, "mean") - self.assertIn( - "Cannot compute volume with scalar Z-axis", str(err.exception) - ) + assert "Cannot compute volume with scalar Z-axis" in str(err.exception) def test_volume_statistics_2d_depth(self): # Create new 2D depth coord diff --git a/tests/unit/task/test_print.py b/tests/unit/task/test_print.py index c942f54169..0137486877 100644 --- a/tests/unit/task/test_print.py +++ b/tests/unit/task/test_print.py @@ -108,7 +108,7 @@ def test_repr_simple_tree(preproc_task, diagnostic_task): def test_repr_full_tree(preproc_task, diagnostic_task): - """Test a more comlicated task tree.""" + """Test a more complicated task tree.""" derive_input_task_1 = copy.deepcopy(preproc_task) derive_input_task_1.name = "diag_1/tas_derive_input_1" diff --git a/tests/unit/test_dataset.py b/tests/unit/test_dataset.py index 317a00d1e9..66d23306ec 100644 --- a/tests/unit/test_dataset.py +++ b/tests/unit/test_dataset.py @@ -708,7 +708,7 @@ def test_from_files(session, monkeypatch): def test_from_files_with_supplementary(session, monkeypatch): rootpath = Path("/path/to/data") - file = esmvalcore.local.LocalFile( + file1 = esmvalcore.local.LocalFile( rootpath, "CMIP6", "CMIP", @@ -722,7 +722,7 @@ def test_from_files_with_supplementary(session, monkeypatch): "v20190827", "tas_Amon_FGOALS-g3_historical_r3i1p1f1_gn_199001-199912.nc", ) - file.facets = { + file1.facets = { "activity": "CMIP", "institute": "CAS", "dataset": "FGOALS-g3", @@ -733,7 +733,7 @@ def test_from_files_with_supplementary(session, monkeypatch): "grid": "gn", "version": "v20190827", } - afile = esmvalcore.local.LocalFile( + file2 = esmvalcore.local.LocalFile( rootpath, "CMIP6", "CMIP", @@ -747,7 +747,7 @@ def test_from_files_with_supplementary(session, monkeypatch): "v20210615", "areacella_fx_FGOALS-g3_historical_r1i1p1f1_gn.nc", ) - afile.facets = { + file2.facets = { "activity": "CMIP", "institute": "CAS", "dataset": "FGOALS-g3", @@ -758,7 +758,7 @@ def test_from_files_with_supplementary(session, monkeypatch): "grid": "gn", "version": "v20210615", } - monkeypatch.setattr(Dataset, "find_files", mock_find_files(file, afile)) + monkeypatch.setattr(Dataset, "find_files", mock_find_files(file1, file2)) dataset = Dataset( short_name="tas", @@ -796,7 +796,7 @@ def test_from_files_with_supplementary(session, monkeypatch): def test_from_files_with_globs(monkeypatch, session): """Test `from_files` with wildcards in dataset and supplementary.""" rootpath = Path("/path/to/data") - file = esmvalcore.local.LocalFile( + file1 = esmvalcore.local.LocalFile( rootpath, "CMIP6", "CMIP", @@ -810,7 +810,7 @@ def test_from_files_with_globs(monkeypatch, session): "v20181126", "tas_Amon_BCC-CSM2-MR_historical_r1i1p1f1_gn_185001-201412.nc", ) - file.facets = { + file1.facets = { "activity": "CMIP", "dataset": "BCC-CSM2-MR", "exp": "historical", @@ -822,7 +822,7 @@ def test_from_files_with_globs(monkeypatch, session): "short_name": "tas", "version": "v20181126", } - afile = esmvalcore.local.LocalFile( + file2 = esmvalcore.local.LocalFile( rootpath, "CMIP6", "GMMIP", @@ -836,7 +836,7 @@ def test_from_files_with_globs(monkeypatch, session): "v20190613", "areacella_fx_BCC-CSM2-MR_hist-resIPO_r1i1p1f1_gn.nc", ) - afile.facets = { + file2.facets = { "activity": "GMMIP", "dataset": "BCC-CSM2-MR", "ensemble": "r1i1p1f1", @@ -869,7 +869,7 @@ def test_from_files_with_globs(monkeypatch, session): dataset.session = session print(dataset) - monkeypatch.setattr(Dataset, "find_files", mock_find_files(file, afile)) + monkeypatch.setattr(Dataset, "find_files", mock_find_files(file1, file2)) datasets = list(dataset.from_files()) @@ -1563,10 +1563,10 @@ def test_set_version(): file_v1.facets["version"] = "v1" file_v2 = esmvalcore.local.LocalFile("/path/to/v2/tas.nc") file_v2.facets["version"] = "v2" - afile = esmvalcore.local.LocalFile("/path/to/v3/areacella.nc") - afile.facets["version"] = "v3" + areacella_file = esmvalcore.local.LocalFile("/path/to/v3/areacella.nc") + areacella_file.facets["version"] = "v3" dataset.files = [file_v2, file_v1] - dataset.supplementaries[0].files = [afile] + dataset.supplementaries[0].files = [areacella_file] dataset.set_version() assert dataset.facets["version"] == ["v1", "v2"] assert dataset.supplementaries[0].facets["version"] == "v3"