From 88d718dfc7eb55233d90533e51d74e52e215da94 Mon Sep 17 00:00:00 2001 From: Petr Cagas Date: Thu, 30 May 2024 14:49:11 +0200 Subject: [PATCH 1/4] Updating the examples_test so it ccan be run from any directory. Test execution now uses an absolute path and the test results are also placed to pytest temp directory to avoid artefact generation. --- test/examples_test.py | 110 +++++++++++++++++++++++++++++++----------- 1 file changed, 81 insertions(+), 29 deletions(-) diff --git a/test/examples_test.py b/test/examples_test.py index 5d74ec16..1586b17b 100644 --- a/test/examples_test.py +++ b/test/examples_test.py @@ -1,61 +1,113 @@ """Test whether the examples are still working.""" +import os import importlib import runpy import pytest - @pytest.mark.examples class TestExamples: - def test_basic_ex01(self): - runpy.run_path("../examples/basic/ex01_train_network.py") + dir_path = os.path.dirname(__file__) + + def test_basic_ex01(self, tmp_path): + os.chdir(tmp_path / "..") + runpy.run_path( + self.dir_path + + "/../examples/basic/ex01_train_network.py" + ) - def test_basic_ex02(self): - runpy.run_path("../examples/basic/ex02_test_network.py") + def test_basic_ex02(self, tmp_path): + os.chdir(tmp_path / "..") + runpy.run_path( + self.dir_path + + "/../examples/basic/ex02_test_network.py" + ) - def test_basic_ex03(self): - runpy.run_path("../examples/basic/ex03_preprocess_data.py") + def test_basic_ex03(self, tmp_path): + os.chdir(tmp_path / "..") + runpy.run_path( + self.dir_path + + "/../examples/basic/ex03_preprocess_data.py" + ) - def test_basic_ex04(self): - runpy.run_path("../examples/basic/ex04_hyperparameter_optimization.py") + def test_basic_ex04(self, tmp_path): + os.chdir(tmp_path / "..") + runpy.run_path( + self.dir_path + + "/../examples/basic/ex04_hyperparameter_optimization.py" + ) - def test_basic_ex05(self): - runpy.run_path("../examples/basic/ex05_run_predictions.py") + def test_basic_ex05(self, tmp_path): + os.chdir(tmp_path / "..") + runpy.run_path( + self.dir_path + + "/../examples/basic/ex05_run_predictions.py" + ) - def test_basic_ex06(self): - runpy.run_path("../examples/basic/ex06_ase_calculator.py") + def test_basic_ex06(self, tmp_path): + os.chdir(tmp_path / "..") + runpy.run_path( + self.dir_path + + "/../examples/basic/ex06_ase_calculator.py" + ) - def test_advanced_ex01(self): - runpy.run_path("../examples/advanced/ex01_checkpoint_training.py") + def test_advanced_ex01(self, tmp_path): + os.chdir(tmp_path / "..") + runpy.run_path( + self.dir_path + + "/../examples/advanced/ex01_checkpoint_training.py" + ) - def test_advanced_ex02(self): - runpy.run_path("../examples/advanced/ex02_shuffle_data.py") + def test_advanced_ex02(self, tmp_path): + os.chdir(tmp_path / "..") + runpy.run_path( + self.dir_path + + "/../examples/advanced/ex02_shuffle_data.py" + ) - def test_advanced_ex03(self): - runpy.run_path("../examples/advanced/ex03_tensor_board.py") + def test_advanced_ex03(self, tmp_path): + os.chdir(tmp_path / "..") + runpy.run_path( + self.dir_path + + "/../examples/advanced/ex03_tensor_board.py" + ) - def test_advanced_ex04(self): - runpy.run_path("../examples/advanced/ex04_acsd.py") + def test_advanced_ex04(self, tmp_path): + os.chdir(tmp_path / "..") + runpy.run_path( + self.dir_path + + "/../examples/advanced/ex04_acsd.py" + ) - def test_advanced_ex05(self): + def test_advanced_ex05(self, tmp_path): + os.chdir(tmp_path / "..") runpy.run_path( - "../examples/advanced/ex05_checkpoint_hyperparameter_optimization.py" + self.dir_path + + "/../examples/advanced/ex05_checkpoint_hyperparameter_optimization.py" ) - def test_advanced_ex06(self): + def test_advanced_ex06(self, tmp_path): + os.chdir(tmp_path / "..") runpy.run_path( - "../examples/advanced/ex06_distributed_hyperparameter_optimization.py" + self.dir_path + + "/../examples/advanced/ex06_distributed_hyperparameter_optimization.py" ) @pytest.mark.skipif( importlib.util.find_spec("oapackage") is None, reason="No OAT found on this machine, skipping this " "test.", ) - def test_advanced_ex07(self): + def test_advanced_ex07(self, tmp_path): + os.chdir(tmp_path / "..") runpy.run_path( - "../examples/advanced/ex07_advanced_hyperparameter_optimization.py" + self.dir_path + + "/../examples/advanced/ex07_advanced_hyperparameter_optimization.py" ) - def test_advanced_ex08(self): - runpy.run_path("../examples/advanced/ex08_visualize_observables.py") + def test_advanced_ex08(self, tmp_path): + os.chdir(tmp_path / "..") + runpy.run_path( + self.dir_path + + "/../examples/advanced/ex08_visualize_observables.py" + ) From b4947ba360baf79a43845781e9718e9328205b93 Mon Sep 17 00:00:00 2001 From: Petr Cagas Date: Fri, 31 May 2024 15:06:05 +0200 Subject: [PATCH 2/4] Removed the dependency of examples on example 01. If the Be_model.zip is not found, it is loaded from hte test-data repository. In addition, `data_path` variable was added to the `datahandling` submodule which points directly to the `Be2` subdirectory --- examples/advanced/ex01_checkpoint_training.py | 12 +++--- examples/advanced/ex02_shuffle_data.py | 8 ++-- examples/advanced/ex03_tensor_board.py | 7 +-- examples/advanced/ex04_acsd.py | 6 +-- ..._checkpoint_hyperparameter_optimization.py | 10 ++--- ...distributed_hyperparameter_optimization.py | 8 ++-- ...07_advanced_hyperparameter_optimization.py | 6 +-- .../advanced/ex08_visualize_observables.py | 13 +++--- examples/basic/ex01_train_network.py | 7 +-- examples/basic/ex02_test_network.py | 11 ++--- examples/basic/ex03_preprocess_data.py | 6 +-- .../basic/ex04_hyperparameter_optimization.py | 6 +-- examples/basic/ex05_run_predictions.py | 16 +++---- examples/basic/ex06_ase_calculator.py | 18 ++++---- mala/datahandling/data_repo.py | 2 + test/examples_test.py | 43 ++++++++++++------- 16 files changed, 85 insertions(+), 94 deletions(-) diff --git a/examples/advanced/ex01_checkpoint_training.py b/examples/advanced/ex01_checkpoint_training.py index 341ff5c6..01bb9b48 100644 --- a/examples/advanced/ex01_checkpoint_training.py +++ b/examples/advanced/ex01_checkpoint_training.py @@ -3,18 +3,16 @@ import mala from mala import printout -from mala.datahandling.data_repo import data_repo_path - -data_path = os.path.join(data_repo_path, "Be2") +from mala.datahandling.data_repo import data_path """ -Shows how a training run can be paused and +Shows how a training run can be paused and resumed. Delete the ex07.zip file prior to execution to see the effect of checkpointing. -Afterwards, execute this script twice to see how MALA progresses from a +Afterwards, execute this script twice to see how MALA progresses from a checkpoint. As the number of total epochs cannot be divided by the number -of epochs after which a checkpoint is created without residual, this will -lead to MALA performing the missing epochs again. +of epochs after which a checkpoint is created without residual, this will +lead to MALA performing the missing epochs again. """ diff --git a/examples/advanced/ex02_shuffle_data.py b/examples/advanced/ex02_shuffle_data.py index 467da792..db75d515 100644 --- a/examples/advanced/ex02_shuffle_data.py +++ b/examples/advanced/ex02_shuffle_data.py @@ -2,14 +2,12 @@ import mala -from mala.datahandling.data_repo import data_repo_path - -data_path = os.path.join(data_repo_path, "Be2") +from mala.datahandling.data_repo import data_path """ Shows how data can be shuffled amongst multiple -snapshots, which is very useful in the lazy loading case, where this cannot be -easily done in memory. +snapshots, which is very useful in the lazy loading case, where this cannot be +easily done in memory. """ diff --git a/examples/advanced/ex03_tensor_board.py b/examples/advanced/ex03_tensor_board.py index 00728a56..b1523949 100644 --- a/examples/advanced/ex03_tensor_board.py +++ b/examples/advanced/ex03_tensor_board.py @@ -3,13 +3,10 @@ import mala from mala import printout -from mala.datahandling.data_repo import data_repo_path - -data_path = os.path.join(data_repo_path, "Be2") - +from mala.datahandling.data_repo import data_path """ -Shows how a NN training by MALA can be visualized using +Shows how a NN training by MALA can be visualized using tensorboard. The training is a basic MALA network training. """ diff --git a/examples/advanced/ex04_acsd.py b/examples/advanced/ex04_acsd.py index 5390ae21..53b4b82b 100644 --- a/examples/advanced/ex04_acsd.py +++ b/examples/advanced/ex04_acsd.py @@ -1,13 +1,11 @@ import os import mala -from mala.datahandling.data_repo import data_repo_path - -data_path = os.path.join(data_repo_path, "Be2") +from mala.datahandling.data_repo import data_path """ Shows how MALA can be used to optimize descriptor -parameters based on the ACSD analysis (see hyperparameter paper in the +parameters based on the ACSD analysis (see hyperparameter paper in the documentation for mathematical details). """ diff --git a/examples/advanced/ex05_checkpoint_hyperparameter_optimization.py b/examples/advanced/ex05_checkpoint_hyperparameter_optimization.py index c7f741d7..cef7c8f4 100644 --- a/examples/advanced/ex05_checkpoint_hyperparameter_optimization.py +++ b/examples/advanced/ex05_checkpoint_hyperparameter_optimization.py @@ -2,16 +2,14 @@ import mala -from mala.datahandling.data_repo import data_repo_path - -data_path = os.path.join(data_repo_path, "Be2") +from mala.datahandling.data_repo import data_path """ -Shows how a hyperparameter optimization run can +Shows how a hyperparameter optimization run can be paused and resumed. Delete all ex04_*.pkl and ex04_*.pth prior to execution. -Afterwards, execute this script twice to see how MALA progresses from a +Afterwards, execute this script twice to see how MALA progresses from a checkpoint. As the number of trials cannot be divided by the number -of epochs after which a checkpoint is created without residual, this will +of epochs after which a checkpoint is created without residual, this will lead to MALA performing the missing trials again. """ diff --git a/examples/advanced/ex06_distributed_hyperparameter_optimization.py b/examples/advanced/ex06_distributed_hyperparameter_optimization.py index 2a67acb3..b34f9bb8 100644 --- a/examples/advanced/ex06_distributed_hyperparameter_optimization.py +++ b/examples/advanced/ex06_distributed_hyperparameter_optimization.py @@ -2,14 +2,12 @@ import mala -from mala.datahandling.data_repo import data_repo_path - -data_path = os.path.join(data_repo_path, "Be2") +from mala.datahandling.data_repo import data_path """ -ex09_distributed_hyperopt.py: Shows how a hyperparameter +ex09_distributed_hyperopt.py: Shows how a hyperparameter optimization can be sped up using a RDB storage. Ideally this should be done -using a database server system, such as PostgreSQL or MySQL. +using a database server system, such as PostgreSQL or MySQL. For this easy example, sqlite will be used. It is highly advisory not to to use this for actual, at-scale calculations! diff --git a/examples/advanced/ex07_advanced_hyperparameter_optimization.py b/examples/advanced/ex07_advanced_hyperparameter_optimization.py index 629d4796..8165ef01 100644 --- a/examples/advanced/ex07_advanced_hyperparameter_optimization.py +++ b/examples/advanced/ex07_advanced_hyperparameter_optimization.py @@ -3,12 +3,10 @@ import mala from mala import printout -from mala.datahandling.data_repo import data_repo_path - -data_path = os.path.join(data_repo_path, "Be2") +from mala.datahandling.data_repo import data_path """ -Shows how recent developments in hyperparameter optimization techniques can be +Shows how recent developments in hyperparameter optimization techniques can be used (OAT / training-free NAS). REQUIRES OAPACKAGE. diff --git a/examples/advanced/ex08_visualize_observables.py b/examples/advanced/ex08_visualize_observables.py index 3b8bbed3..be344b87 100644 --- a/examples/advanced/ex08_visualize_observables.py +++ b/examples/advanced/ex08_visualize_observables.py @@ -2,18 +2,15 @@ import mala -from mala.datahandling.data_repo import data_repo_path +from mala.datahandling.data_repo import data_path -atoms_path = os.path.join( - os.path.join(data_repo_path, "Be2"), "Be_snapshot1.out" -) -ldos_path = os.path.join( - os.path.join(data_repo_path, "Be2"), "Be_snapshot1.out.npy" -) """ -Shows how MALA can be used to visualize observables of interest. +Shows how MALA can be used to visualize observables of interest. """ +atoms_path = os.path.join(data_path, "Be_snapshot1.out") +ldos_path = os.path.join(data_path, "Be_snapshot1.out.npy") + #################### # 1. READ ELECTRONIC STRUCTURE DATA # This data may be read as part of an ML-DFT model inference. diff --git a/examples/basic/ex01_train_network.py b/examples/basic/ex01_train_network.py index a5d14d89..95eb2d51 100644 --- a/examples/basic/ex01_train_network.py +++ b/examples/basic/ex01_train_network.py @@ -2,9 +2,7 @@ import mala -from mala.datahandling.data_repo import data_repo_path - -data_path = os.path.join(data_repo_path, "Be2") +from mala.datahandling.data_repo import data_path """ This example shows how a neural network can be trained on material @@ -12,7 +10,6 @@ from *.npy files. """ - #################### # 1. PARAMETERS # The first step of each MALA workflow is to define a parameters object and @@ -93,5 +90,5 @@ test_trainer.train_network() additional_calculation_data = os.path.join(data_path, "Be_snapshot0.out") test_trainer.save_run( - "be_model", additional_calculation_data=additional_calculation_data + "Be_model", additional_calculation_data=additional_calculation_data ) diff --git a/examples/basic/ex02_test_network.py b/examples/basic/ex02_test_network.py index 6ef81f88..2e4b8953 100644 --- a/examples/basic/ex02_test_network.py +++ b/examples/basic/ex02_test_network.py @@ -3,17 +3,16 @@ import mala from mala import printout -from mala.datahandling.data_repo import data_repo_path - -data_path = os.path.join(data_repo_path, "Be2") +from mala.datahandling.data_repo import data_path """ This example shows how a trained network can be tested with additional test snapshots. Either execute ex01 before executing this one or download the appropriate model from the provided test data repo. """ -assert os.path.exists("be_model.zip"), "Be model missing, run ex01 first." +model_name = "Be_model" +model_path = "./" if os.path.exists("Be_model.zip") else data_path #################### # 1. LOADING A NETWORK @@ -27,7 +26,9 @@ # (output_format="list") or as an averaged value (output_format="mae") #################### -parameters, network, data_handler, tester = mala.Tester.load_run("be_model") +parameters, network, data_handler, tester = mala.Tester.load_run( + run_name=model_name, path=model_path +) tester.observables_to_test = ["band_energy", "number_of_electrons"] tester.output_format = "list" parameters.data.use_lazy_loading = True diff --git a/examples/basic/ex03_preprocess_data.py b/examples/basic/ex03_preprocess_data.py index 72ec9490..b0a10488 100644 --- a/examples/basic/ex03_preprocess_data.py +++ b/examples/basic/ex03_preprocess_data.py @@ -2,13 +2,11 @@ import mala -from mala.datahandling.data_repo import data_repo_path - -data_path = os.path.join(data_repo_path, "Be2") +from mala.datahandling.data_repo import data_path """ Shows how this framework can be used to preprocess -data. Preprocessing here means converting raw DFT calculation output into +data. Preprocessing here means converting raw DFT calculation output into numpy arrays of the correct size. For the input data, this means descriptor calculation. diff --git a/examples/basic/ex04_hyperparameter_optimization.py b/examples/basic/ex04_hyperparameter_optimization.py index 77985f03..4c68179c 100644 --- a/examples/basic/ex04_hyperparameter_optimization.py +++ b/examples/basic/ex04_hyperparameter_optimization.py @@ -2,14 +2,12 @@ import mala -from mala.datahandling.data_repo import data_repo_path - -data_path = os.path.join(data_repo_path, "Be2") +from mala.datahandling.data_repo import data_path """ Shows how a hyperparameter optimization can be done using this framework. There are multiple hyperparameter optimizers available in this framework. This example -focusses on the most universal one - optuna. +focusses on the most universal one - optuna. """ diff --git a/examples/basic/ex05_run_predictions.py b/examples/basic/ex05_run_predictions.py index 4e0d72e3..05deb857 100644 --- a/examples/basic/ex05_run_predictions.py +++ b/examples/basic/ex05_run_predictions.py @@ -4,19 +4,19 @@ import mala from mala import printout -from mala.datahandling.data_repo import data_repo_path - -data_path = os.path.join(data_repo_path, "Be2") - -assert os.path.exists("be_model.zip"), "Be model missing, run ex01 first." +from mala.datahandling.data_repo import data_path """ -Show how a prediction can be made using MALA, based on only a -trained network and atomic configurations. +Show how a prediction can be made using MALA, based on only a trained network and atomic +configurations. Either execute ex01 before executing this one or download the +appropriate model from the provided test data repo. REQUIRES LAMMPS (and potentiall the total energy module). """ +model_name = "Be_model" +model_path = "./" if os.path.exists("Be_model.zip") else data_path + #################### # 1. LOADING A NETWORK @@ -24,7 +24,7 @@ # Tester class interface. Afterwards, set the necessary parameters. #################### parameters, network, data_handler, predictor = mala.Predictor.load_run( - "be_model" + run_name=model_name, path=model_path ) diff --git a/examples/basic/ex06_ase_calculator.py b/examples/basic/ex06_ase_calculator.py index f4ab2d33..7ba0eee1 100644 --- a/examples/basic/ex06_ase_calculator.py +++ b/examples/basic/ex06_ase_calculator.py @@ -1,21 +1,21 @@ import os -import mala from ase.io import read +import mala -from mala.datahandling.data_repo import data_repo_path - -data_path = os.path.join(data_repo_path, "Be2") - -assert os.path.exists("be_model.zip"), "Be model missing, run ex01 first." +from mala.datahandling.data_repo import data_path """ -Shows how MALA can be used as an ASE calculator. -Currently, calculation of forces is not supported. +Shows how MALA can be used as an ASE calculator. +Currently, calculation of forces is not supported. Either execute ex01 before executing +this one or download the appropriate model from the provided test data repo. REQUIRES LAMMPS AND QUANTUM ESPRESSO (TOTAL ENERGY MODULE). """ +model_name = "Be_model" +model_path = "./" if os.path.exists("Be_model.zip") else data_path + #################### # 1. LOADING A NETWORK @@ -23,7 +23,7 @@ # Further make sure to set the path to the pseudopotential used during # data generation- #################### -calculator = mala.MALA.load_model("be_model") +calculator = mala.MALA.load_model(run_name=model_name, path=model_path) calculator.mala_parameters.targets.pseudopotential_path = data_path #################### diff --git a/mala/datahandling/data_repo.py b/mala/datahandling/data_repo.py index 178872b6..203885c1 100644 --- a/mala/datahandling/data_repo.py +++ b/mala/datahandling/data_repo.py @@ -14,9 +14,11 @@ name = "MALA_DATA_REPO" if name in os.environ: data_repo_path = os.environ[name] + data_path = os.path.join(data_repo_path, "Be2") else: parallel_warn( f"Environment variable {name} not set. You won't be able " "to run all examples and tests." ) data_repo_path = None + data_path = None diff --git a/test/examples_test.py b/test/examples_test.py index 1586b17b..b5aa9143 100644 --- a/test/examples_test.py +++ b/test/examples_test.py @@ -1,7 +1,7 @@ """Test whether the examples are still working.""" -import os import importlib +import os import runpy import pytest @@ -11,84 +11,95 @@ class TestExamples: dir_path = os.path.dirname(__file__) def test_basic_ex01(self, tmp_path): - os.chdir(tmp_path / "..") + os.chdir(tmp_path) runpy.run_path( self.dir_path + "/../examples/basic/ex01_train_network.py" ) + @pytest.mark.order(after="test_basic_ex01") def test_basic_ex02(self, tmp_path): - os.chdir(tmp_path / "..") + os.chdir(tmp_path) runpy.run_path( self.dir_path + "/../examples/basic/ex02_test_network.py" ) + @pytest.mark.order(after="test_basic_ex01") def test_basic_ex03(self, tmp_path): - os.chdir(tmp_path / "..") + os.chdir(tmp_path) runpy.run_path( self.dir_path + "/../examples/basic/ex03_preprocess_data.py" ) + @pytest.mark.order(after="test_basic_ex01") def test_basic_ex04(self, tmp_path): - os.chdir(tmp_path / "..") + os.chdir(tmp_path) runpy.run_path( self.dir_path + "/../examples/basic/ex04_hyperparameter_optimization.py" ) + @pytest.mark.order(after="test_basic_ex01") def test_basic_ex05(self, tmp_path): - os.chdir(tmp_path / "..") + os.chdir(tmp_path) runpy.run_path( self.dir_path + "/../examples/basic/ex05_run_predictions.py" ) + @pytest.mark.order(after="test_basic_ex01") def test_basic_ex06(self, tmp_path): - os.chdir(tmp_path / "..") + os.chdir(tmp_path) runpy.run_path( self.dir_path + "/../examples/basic/ex06_ase_calculator.py" ) + @pytest.mark.order(after="test_basic_ex01") def test_advanced_ex01(self, tmp_path): - os.chdir(tmp_path / "..") + os.chdir(tmp_path) runpy.run_path( self.dir_path + "/../examples/advanced/ex01_checkpoint_training.py" ) + @pytest.mark.order(after="test_basic_ex01") def test_advanced_ex02(self, tmp_path): - os.chdir(tmp_path / "..") + os.chdir(tmp_path) runpy.run_path( self.dir_path + "/../examples/advanced/ex02_shuffle_data.py" ) + @pytest.mark.order(after="test_basic_ex01") def test_advanced_ex03(self, tmp_path): - os.chdir(tmp_path / "..") + os.chdir(tmp_path) runpy.run_path( self.dir_path + "/../examples/advanced/ex03_tensor_board.py" ) + @pytest.mark.order(after="test_basic_ex01") def test_advanced_ex04(self, tmp_path): - os.chdir(tmp_path / "..") + os.chdir(tmp_path) runpy.run_path( self.dir_path + "/../examples/advanced/ex04_acsd.py" ) + @pytest.mark.order(after="test_basic_ex01") def test_advanced_ex05(self, tmp_path): - os.chdir(tmp_path / "..") + os.chdir(tmp_path) runpy.run_path( self.dir_path + "/../examples/advanced/ex05_checkpoint_hyperparameter_optimization.py" ) + @pytest.mark.order(after="test_basic_ex01") def test_advanced_ex06(self, tmp_path): - os.chdir(tmp_path / "..") + os.chdir(tmp_path) runpy.run_path( self.dir_path + "/../examples/advanced/ex06_distributed_hyperparameter_optimization.py" @@ -98,15 +109,17 @@ def test_advanced_ex06(self, tmp_path): importlib.util.find_spec("oapackage") is None, reason="No OAT found on this machine, skipping this " "test.", ) + @pytest.mark.order(after="test_basic_ex01") def test_advanced_ex07(self, tmp_path): - os.chdir(tmp_path / "..") + os.chdir(tmp_path) runpy.run_path( self.dir_path + "/../examples/advanced/ex07_advanced_hyperparameter_optimization.py" ) + @pytest.mark.order(after="test_basic_ex01") def test_advanced_ex08(self, tmp_path): - os.chdir(tmp_path / "..") + os.chdir(tmp_path) runpy.run_path( self.dir_path + "/../examples/advanced/ex08_visualize_observables.py" From a8a2a0ade003552fdb8fb301dc59c92747a71f80 Mon Sep 17 00:00:00 2001 From: Petr Cagas Date: Fri, 7 Jun 2024 10:46:09 +0200 Subject: [PATCH 3/4] Updating the RODARE path --- .github/workflows/cpu-tests.yml | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/.github/workflows/cpu-tests.yml b/.github/workflows/cpu-tests.yml index b18305a2..06336340 100644 --- a/.github/workflows/cpu-tests.yml +++ b/.github/workflows/cpu-tests.yml @@ -1,6 +1,7 @@ name: CPU tests on: + workflow_dispatch: pull_request: # Trigger on pull requests to master or develop that are # marked as "ready for review" (non-draft PRs) @@ -174,16 +175,16 @@ jobs: shell: 'bash -c "docker exec -i mala-cpu bash < {0}"' run: | # Download test data repository from RODARE. If the version changes - # this URL has to be adapted (the number after /record/ and the + # this URL has to be adapted (the number after /record/ and the # version have to be incremented) - wget "https://rodare.hzdr.de/record/2999/files/mala-project/test-data-1.8.0.zip" - + wget "https://rodare.hzdr.de/record/3004/files/mala-project/test-data-1.8.1.zip" + # Once downloaded, we have to unzip the file. The name of the root # folder in the zip file has to be updated for data repository - # updates as well - the string at the end is the hash of the data - # repository commit. - unzip -q test-data-1.8.0.zip - mv mala-project-test-data-d5694c7 mala_data + # updates as well - the string at the end is the hash of the data + # repository commit. + unzip -q test-data-1.8.1.zip + mv mala-project-test-data-741eda6 mala_data - name: Test mala shell: 'bash -c "docker exec -i mala-cpu bash < {0}"' From 67e7c3f42b97f6e92ed0ce5aab6335a5f38925e3 Mon Sep 17 00:00:00 2001 From: Petr Cagas Date: Fri, 7 Jun 2024 13:53:27 +0200 Subject: [PATCH 4/4] Updating the path to test-data in the test suite and redirecting workflow_test to Be_model --- test/all_lazy_loading_test.py | 4 +--- test/basic_gpu_test.py | 8 +++----- test/checkpoint_hyperopt_test.py | 4 +--- test/checkpoint_training_test.py | 3 +-- test/complete_interfaces_test.py | 4 +--- test/descriptor_test.py | 4 +--- test/hyperopt_test.py | 4 +--- test/inference_test.py | 8 +++----- test/integration_test.py | 3 +-- test/parallel_run_test.py | 4 +--- test/scaling_test.py | 4 +--- test/shuffling_test.py | 4 +--- test/tensor_memory_test.py | 4 +--- test/workflow_test.py | 9 ++++----- 14 files changed, 21 insertions(+), 46 deletions(-) diff --git a/test/all_lazy_loading_test.py b/test/all_lazy_loading_test.py index f5cc7400..065cbb86 100644 --- a/test/all_lazy_loading_test.py +++ b/test/all_lazy_loading_test.py @@ -7,9 +7,7 @@ import torch import pytest -from mala.datahandling.data_repo import data_repo_path - -data_path = os.path.join(data_repo_path, "Be2") +from mala.datahandling.data_repo import data_path # This test compares the data scaling using the regular scaling procedure and # the lazy-loading one (incremental fitting). diff --git a/test/basic_gpu_test.py b/test/basic_gpu_test.py index 943862b3..dcd588ad 100644 --- a/test/basic_gpu_test.py +++ b/test/basic_gpu_test.py @@ -6,9 +6,9 @@ which MALA relies on). Two things are tested: 1. Whether or not your system has GPU support. -2. Whether or not the GPU does what it is supposed to. For this, +2. Whether or not the GPU does what it is supposed to. For this, a training is performed. It is measured whether or not the utilization -of the GPU results in a speed up. +of the GPU results in a speed up. """ import os import time @@ -19,9 +19,7 @@ import pytest import torch -from mala.datahandling.data_repo import data_repo_path - -data_path = os.path.join(data_repo_path, "Be2") +from mala.datahandling.data_repo import data_path test_checkpoint_name = "test" diff --git a/test/checkpoint_hyperopt_test.py b/test/checkpoint_hyperopt_test.py index f3435e7a..28889c2d 100644 --- a/test/checkpoint_hyperopt_test.py +++ b/test/checkpoint_hyperopt_test.py @@ -4,9 +4,7 @@ from mala import printout import numpy as np -from mala.datahandling.data_repo import data_repo_path - -data_path = os.path.join(data_repo_path, "Be2") +from mala.datahandling.data_repo import data_path checkpoint_name = "test_ho" diff --git a/test/checkpoint_training_test.py b/test/checkpoint_training_test.py index bf7f6209..4c56ed8e 100644 --- a/test/checkpoint_training_test.py +++ b/test/checkpoint_training_test.py @@ -4,9 +4,8 @@ from mala import printout import numpy as np -from mala.datahandling.data_repo import data_repo_path +from mala.datahandling.data_repo import data_path -data_path = os.path.join(data_repo_path, "Be2") test_checkpoint_name = "test" # Define the accuracy used in the tests. diff --git a/test/complete_interfaces_test.py b/test/complete_interfaces_test.py index 127ba8f8..d793da77 100644 --- a/test/complete_interfaces_test.py +++ b/test/complete_interfaces_test.py @@ -8,9 +8,7 @@ import pytest -from mala.datahandling.data_repo import data_repo_path - -data_path = os.path.join(data_repo_path, "Be2") +from mala.datahandling.data_repo import data_path # This test checks whether MALA interfaces to other codes, mainly the ASE diff --git a/test/descriptor_test.py b/test/descriptor_test.py index 4a208f83..74cae40f 100644 --- a/test/descriptor_test.py +++ b/test/descriptor_test.py @@ -6,9 +6,7 @@ import numpy as np import pytest -from mala.datahandling.data_repo import data_repo_path - -data_path = os.path.join(data_repo_path, "Be2") +from mala.datahandling.data_repo import data_path # Accuracy of test. accuracy_descriptors = 5e-8 diff --git a/test/hyperopt_test.py b/test/hyperopt_test.py index b2d93f87..bb003082 100644 --- a/test/hyperopt_test.py +++ b/test/hyperopt_test.py @@ -7,9 +7,7 @@ import mala import numpy as np -from mala.datahandling.data_repo import data_repo_path - -data_path = os.path.join(data_repo_path, "Be2") +from mala.datahandling.data_repo import data_path # Control how much the loss should be better after hyperopt compared to # before. This value is fairly high, but we're training on absolutely diff --git a/test/inference_test.py b/test/inference_test.py index 4e874570..84e0e9cc 100644 --- a/test/inference_test.py +++ b/test/inference_test.py @@ -3,10 +3,8 @@ import numpy as np from mala import Tester, Runner -from mala.datahandling.data_repo import data_repo_path +from mala.datahandling.data_repo import data_path -data_path = os.path.join(data_repo_path, "Be2") -param_path = os.path.join(data_repo_path, "workflow_test/") accuracy_strict = 1e-16 accuracy_coarse = 5e-7 accuracy_very_coarse = 3 @@ -18,7 +16,7 @@ class TestInference: def test_unit_conversion(self): """Test that RAM inexpensive unit conversion works.""" parameters, network, data_handler = Runner.load_run( - "workflow_test", load_runner=False, path=param_path + "Be_model", load_runner=False, path=data_path ) parameters.data.use_lazy_loading = False parameters.running.mini_batch_size = 50 @@ -99,7 +97,7 @@ def test_inference_lazy_loading(self): def __run(use_lazy_loading=False, batchsize=46): # First we load Parameters and network. parameters, network, data_handler, tester = Tester.load_run( - "workflow_test", path=param_path + "Be_model", path=data_path ) parameters.data.use_lazy_loading = use_lazy_loading parameters.running.mini_batch_size = batchsize diff --git a/test/integration_test.py b/test/integration_test.py index b27abb87..e4e22ea9 100644 --- a/test/integration_test.py +++ b/test/integration_test.py @@ -6,7 +6,7 @@ import scipy as sp import pytest -from mala.datahandling.data_repo import data_repo_path +from mala.datahandling.data_repo import data_path # In order to test the integration capabilities of MALA we need a # QuantumEspresso @@ -18,7 +18,6 @@ # Scripts to reproduce the data files used in this test script can be found # in the data repo. -data_path = os.path.join(data_repo_path, "Be2") path_to_out = os.path.join(data_path, "Be_snapshot0.out") path_to_ldos_npy = os.path.join(data_path, "Be_snapshot0.out.npy") path_to_dos_npy = os.path.join(data_path, "Be_snapshot0.dos.npy") diff --git a/test/parallel_run_test.py b/test/parallel_run_test.py index 89b0cbad..6ca5c8c8 100644 --- a/test/parallel_run_test.py +++ b/test/parallel_run_test.py @@ -6,9 +6,7 @@ from ase.io import read import pytest -from mala.datahandling.data_repo import data_repo_path - -data_path = os.path.join(data_repo_path, "Be2") +from mala.datahandling.data_repo import data_path # Control the various accuracies.. accuracy_snaps = 1e-4 diff --git a/test/scaling_test.py b/test/scaling_test.py index d4364843..b7925cd9 100644 --- a/test/scaling_test.py +++ b/test/scaling_test.py @@ -4,9 +4,7 @@ import numpy as np import torch -from mala.datahandling.data_repo import data_repo_path - -data_path = os.path.join(data_repo_path, "Be2") +from mala.datahandling.data_repo import data_path # This test checks that all scaling options are working and are not messing # up the data. diff --git a/test/shuffling_test.py b/test/shuffling_test.py index 202e40c9..e637c7d2 100644 --- a/test/shuffling_test.py +++ b/test/shuffling_test.py @@ -3,9 +3,7 @@ import mala import numpy as np -from mala.datahandling.data_repo import data_repo_path - -data_path = os.path.join(data_repo_path, "Be2") +from mala.datahandling.data_repo import data_path # Accuracy for the shuffling test. accuracy = np.finfo(float).eps diff --git a/test/tensor_memory_test.py b/test/tensor_memory_test.py index 4a70d971..b3cb2567 100644 --- a/test/tensor_memory_test.py +++ b/test/tensor_memory_test.py @@ -5,9 +5,7 @@ from torch.utils.data import TensorDataset from torch.utils.data import DataLoader -from mala.datahandling.data_repo import data_repo_path - -data_path = os.path.join(data_repo_path, "Be2") +from mala.datahandling.data_repo import data_path # Define the accuracy used in the tests. accuracy = 1e-5 diff --git a/test/workflow_test.py b/test/workflow_test.py index a652546f..fa7dee01 100644 --- a/test/workflow_test.py +++ b/test/workflow_test.py @@ -5,9 +5,8 @@ import numpy as np import pytest -from mala.datahandling.data_repo import data_repo_path +from mala.datahandling.data_repo import data_path -data_path = os.path.join(data_repo_path, "Be2") # Control how much the loss should be better after training compared to # before. This value is fairly high, but we're training on absolutely # minimal amounts of data. @@ -382,7 +381,7 @@ def test_training_with_postprocessing_data_repo(self): """ # Load parameters, network and data scalers. parameters, network, data_handler, tester = mala.Tester.load_run( - "workflow_test", path=os.path.join(data_repo_path, "workflow_test") + "Be_model", path=data_path ) parameters.targets.target_type = "LDOS" @@ -431,7 +430,7 @@ def test_predictions(self): #################### parameters, network, data_handler, tester = mala.Tester.load_run( - "workflow_test", path=os.path.join(data_repo_path, "workflow_test") + "Be_model", path=data_path ) parameters.targets.target_type = "LDOS" parameters.targets.ldos_gridsize = 11 @@ -518,7 +517,7 @@ def test_total_energy_predictions(self): #################### parameters, network, data_handler, predictor = mala.Predictor.load_run( - "workflow_test", path=os.path.join(data_repo_path, "workflow_test") + "Be_model", path=data_path ) parameters.targets.target_type = "LDOS" parameters.targets.ldos_gridsize = 11