diff --git a/.github/actions/run_tests/entrypoint.sh b/.github/actions/run_tests/entrypoint.sh index 53692ab10f..7e0a4b72d7 100644 --- a/.github/actions/run_tests/entrypoint.sh +++ b/.github/actions/run_tests/entrypoint.sh @@ -8,6 +8,8 @@ WS_PATH=$RUNNER_WORKSPACE/$REPO_NAME # set CI jobs directory variable to easily move it CI_JOBS_DIR=.github/jobs +PYTESTS_GROUPS_FILEPATH=.github/parm/pytest_groups.txt + source ${GITHUB_WORKSPACE}/${CI_JOBS_DIR}/bash_functions.sh # get branch name for push or pull request events @@ -30,10 +32,8 @@ if [ $? != 0 ]; then ${GITHUB_WORKSPACE}/${CI_JOBS_DIR}/docker_setup.sh fi -# # running unit tests (pytests) -# -if [ "$INPUT_CATEGORIES" == "pytests" ]; then +if [[ "$INPUT_CATEGORIES" == pytests* ]]; then export METPLUS_ENV_TAG="pytest" export METPLUS_IMG_TAG=${branch_name} echo METPLUS_ENV_TAG=${METPLUS_ENV_TAG} @@ -56,14 +56,20 @@ if [ "$INPUT_CATEGORIES" == "pytests" ]; then . echo Running Pytests - command="export METPLUS_PYTEST_HOST=docker; cd internal_tests/pytests; /usr/local/envs/pytest/bin/pytest -vv --cov=../../metplus" + command="export METPLUS_PYTEST_HOST=docker; cd internal_tests/pytests;" + command+="status=0;" + for x in `cat $PYTESTS_GROUPS_FILEPATH`; do + marker="${x//_or_/ or }" + marker="${marker//not_/not }" + command+="/usr/local/envs/pytest/bin/pytest -vv --cov=../../metplus -m \"$marker\"" + command+=";if [ \$? != 0 ]; then status=1; fi;" + done + command+="if [ \$status != 0 ]; then echo ERROR: Some pytests failed. Search for FAILED to review; false; fi" time_command docker run -v $WS_PATH:$GITHUB_WORKSPACE --workdir $GITHUB_WORKSPACE $RUN_TAG bash -c "$command" exit $? fi -# # running use case tests -# # split apart use case category and subset list from input CATEGORIES=`echo $INPUT_CATEGORIES | awk -F: '{print $1}'` diff --git a/.github/jobs/get_use_cases_to_run.sh b/.github/jobs/get_use_cases_to_run.sh index 39c250474c..341d1c4801 100755 --- a/.github/jobs/get_use_cases_to_run.sh +++ b/.github/jobs/get_use_cases_to_run.sh @@ -1,6 +1,7 @@ #! /bin/bash use_case_groups_filepath=.github/parm/use_case_groups.json + # set matrix to string of an empty array in case no use cases will be run matrix="[]" @@ -31,12 +32,14 @@ fi if [ "$run_unit_tests" == "true" ]; then echo Adding unit tests to list to run + pytests="\"pytests\"," + # if matrix is empty, set to an array that only includes pytests if [ "$matrix" == "[]" ]; then - matrix="[\"pytests\"]" + matrix="[${pytests:0: -1}]" # otherwise prepend item to list else - matrix="[\"pytests\", ${matrix:1}" + matrix="[${pytests}${matrix:1}" fi fi diff --git a/.github/parm/pytest_groups.txt b/.github/parm/pytest_groups.txt new file mode 100644 index 0000000000..374b99da80 --- /dev/null +++ b/.github/parm/pytest_groups.txt @@ -0,0 +1,6 @@ +util +wrapper +wrapper_a +wrapper_b +wrapper_c +plotting_or_long diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index 1b47909874..deff878b83 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -139,24 +139,24 @@ jobs: # copy logs with errors to error_logs directory to save as artifact - name: Save error logs id: save-errors - if: ${{ always() && steps.run_tests.conclusion == 'failure' && matrix.categories != 'pytests' }} + if: ${{ always() && steps.run_tests.conclusion == 'failure' && !startsWith(matrix.categories,'pytests') }} run: .github/jobs/save_error_logs.sh # run difference testing - name: Run difference tests id: run-diff - if: ${{ needs.job_control.outputs.run_diff == 'true' && steps.run_tests.conclusion == 'success' && matrix.categories != 'pytests' }} + if: ${{ needs.job_control.outputs.run_diff == 'true' && steps.run_tests.conclusion == 'success' && !startsWith(matrix.categories,'pytests') }} run: .github/jobs/run_difference_tests.sh ${{ matrix.categories }} ${{ steps.get-artifact-name.outputs.artifact_name }} # copy output data to save as artifact - name: Save output data id: save-output - if: ${{ always() && steps.run_tests.conclusion != 'skipped' && matrix.categories != 'pytests' }} + if: ${{ always() && steps.run_tests.conclusion != 'skipped' && !startsWith(matrix.categories,'pytests') }} run: .github/jobs/copy_output_to_artifact.sh ${{ steps.get-artifact-name.outputs.artifact_name }} - name: Upload output data artifact uses: actions/upload-artifact@v2 - if: ${{ always() && steps.run_tests.conclusion != 'skipped' && matrix.categories != 'pytests' }} + if: ${{ always() && steps.run_tests.conclusion != 'skipped' && !startsWith(matrix.categories,'pytests') }} with: name: ${{ steps.get-artifact-name.outputs.artifact_name }} path: artifact/${{ steps.get-artifact-name.outputs.artifact_name }} diff --git a/docs/Contributors_Guide/continuous_integration.rst b/docs/Contributors_Guide/continuous_integration.rst index 2ad3091b6b..dbb293112e 100644 --- a/docs/Contributors_Guide/continuous_integration.rst +++ b/docs/Contributors_Guide/continuous_integration.rst @@ -557,6 +557,38 @@ process can be found in the :ref:`use_case_input_data` section of the Add Use Cases chapter of the Contributor's Guide. +.. _cg-ci-unit-tests: + +Unit Tests +---------- + +Unit tests are run via pytest. +Groups of pytests are run in the 'pytests' job. +The list of groups that will be run in the automated tests are found in +.github/parm/pytest_groups.txt. +See :ref:`cg-unit-tests` for more information on pytest groups. + +Items in pytest_groups.txt can include:: + + * A single group marker name, i.e. wrapper_a + * Multiple group marker names separated by _or_, i.e. plotting_or_long + * A group marker name to exclude starting with not_, i.e. not_wrapper + +All pytest groups are currently run in a single GitHub Actions job. +This was done because the existing automation logic builds a Docker +environment to run the tests and each testing environment takes a few minutes +to create (future improvements may speed up execution time by running the +pytests directly in the GitHub Actions environment instead of Docker). +Running the pytests in smaller groups serially takes substantially less time +than calling all of the existing pytests in a single call to pytest, +so dividing tests into groups is recommended to improve performance. +Searching for the string "deselected in" in the pytests job log can be used +to see how long each group took to run. + +Future enhancements could be made to save and parse this information for each +run to output a summary at the end of the log file to more easily see which +groups could be broken up to improve performance. + .. _cg-ci-use-case-tests: Use Case Tests diff --git a/docs/Contributors_Guide/testing.rst b/docs/Contributors_Guide/testing.rst index 3db6f6ff72..93c2f9627f 100644 --- a/docs/Contributors_Guide/testing.rst +++ b/docs/Contributors_Guide/testing.rst @@ -4,19 +4,59 @@ Testing Test scripts are found in the GitHub repository in the internal_tests directory. +.. _cg-unit-tests: + Unit Tests ---------- Unit tests are run with pytest. They are found in the *pytests* directory. Each tool has its own subdirectory containing its test files. -**run_pytests.sh** is a bash script that can be run to execute all of the -pytests. A report will be output showing which pytest categories failed. -When running on a new computer, a -**minimum_pytest..sh** +Unit tests can be run by running the 'pytest' command from the +internal_tests/pytests directory of the repository. +The 'pytest' Python package must be available. +A report will be output showing which pytest categories failed. +When running on a new computer, a **minimum_pytest..sh** file must be created to be able to run the script. This file contains information about the local environment so that the tests can run. +All unit tests must include one of the custom markers listed in the +internal_tests/pytests/pytest.ini file. Some examples include: + + * util + * wrapper_a + * wrapper_b + * wrapper_c + * wrapper + * long + * plotting + +To apply a marker to a unit test function, add the following on the line before +the function definition:: + + @pytest.mark. + +where is one of the custom marker strings listed in pytest.ini. + +New pytest markers should be added to the pytest.ini file with a brief +description. If they are not added to the markers list, then a warning will +be output when running the tests. + +There are many unit tests for METplus and false failures can occur if all of +the are attempted to run at once. +To run only tests with a given marker, run:: + + pytest -m + +To run all tests that do not have a given marker, run:: + + pytest -m "not " + +Multiple marker groups can be run by using the 'or' keyword:: + + pytest -m " or " + + Use Case Tests -------------- diff --git a/internal_tests/pytests/plotting/make_plots/test_make_plots_wrapper.py b/internal_tests/pytests/plotting/make_plots/test_make_plots_wrapper.py index f1a2e54d32..2be97153a6 100644 --- a/internal_tests/pytests/plotting/make_plots/test_make_plots_wrapper.py +++ b/internal_tests/pytests/plotting/make_plots/test_make_plots_wrapper.py @@ -1,47 +1,14 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 -import os -import datetime -import sys -import logging import pytest -import datetime -import produtil.setup +import os from metplus.wrappers.make_plots_wrapper import MakePlotsWrapper -from metplus.util import met_util as util - -# -# These are tests (not necessarily unit tests) for the -# wrapper to make plots, make_plots_wrapper.py -# NOTE: This test requires pytest, which is NOT part of the standard Python -# library. -# These tests require one configuration file in addition to the three -# required METplus configuration files: test_make_plots.conf. This contains -# the information necessary for running all the tests. Each test can be -# customized to replace various settings if needed. -# - -# -# -----------Mandatory----------- -# configuration and fixture to support METplus configuration files beyond -# the metplus_data, metplus_system, and metplus_runtime conf files. -# +METPLUS_BASE = os.getcwd().split('/internal_tests')[0] -# Add a test configuration -def pytest_addoption(parser): - parser.addoption("-c", action="store", help=" -c ") -# @pytest.fixture -def cmdopt(request): - return request.config.getoption("-c") - -# -# ------------Pytest fixtures that can be used for all tests --------------- -# -#@pytest.fixture def make_plots_wrapper(metplus_config): """! Returns a default MakePlotsWrapper with /path/to entries in the metplus_system.conf and metplus_runtime.conf configuration @@ -55,35 +22,8 @@ def make_plots_wrapper(metplus_config): config = metplus_config(extra_configs) return MakePlotsWrapper(config) -# ------------------TESTS GO BELOW --------------------------- -# - -#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -# To test numerous files for filesize, use parametrization: -# @pytest.mark.parametrize( -# 'key, value', [ -# ('/usr/local/met-6.1/bin/point_stat', 382180), -# ('/usr/local/met-6.1/bin/stat_analysis', 3438944), -# ('/usr/local/met-6.1/bin/pb2nc', 3009056) -# -# ] -# ) -# def test_file_sizes(key, value): -# st = stat_analysis_wrapper() -# # Retrieve the value of the class attribute that corresponds -# # to the key in the parametrization -# files_in_dir = [] -# for dirpath, dirnames, files in os.walk("/usr/local/met-6.1/bin"): -# for name in files: -# files_in_dir.append(os.path.join(dirpath, name)) -# if actual_key in files_in_dir: -# # The actual_key is one of the files of interest we retrieved from -# # the output directory. Verify that it's file size is what we -# # expected. -# assert actual_key == key -#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -METPLUS_BASE = os.getcwd().split('/internal_tests')[0] +@pytest.mark.plotting def test_get_command(metplus_config): # Independently test that the make_plots python # command is being put together correctly with @@ -98,6 +38,8 @@ def test_get_command(metplus_config): test_command = mp.get_command() assert(expected_command == test_command) + +@pytest.mark.plotting def test_create_c_dict(metplus_config): # Independently test that c_dict is being created # and that the wrapper and config reader diff --git a/internal_tests/pytests/plotting/plot_util/test_plot_util.py b/internal_tests/pytests/plotting/plot_util/test_plot_util.py index 386c584bb4..d22ce6b7aa 100644 --- a/internal_tests/pytests/plotting/plot_util/test_plot_util.py +++ b/internal_tests/pytests/plotting/plot_util/test_plot_util.py @@ -1,48 +1,23 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 + +import pytest import os -import datetime import sys -import logging -import pytest import datetime +import logging + import numpy as np import pandas as pd -import produtil.setup -# ------------------TESTS GO BELOW --------------------------- -# - -#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -# To test numerous files for filesize, use parametrization: -# @pytest.mark.parametrize( -# 'key, value', [ -# ('/usr/local/met-6.1/bin/point_stat', 382180), -# ('/usr/local/met-6.1/bin/stat_analysis', 3438944), -# ('/usr/local/met-6.1/bin/pb2nc', 3009056) -# -# ] -# ) -# def test_file_sizes(key, value): -# st = stat_analysis_wrapper() -# # Retrieve the value of the class attribute that corresponds -# # to the key in the parametrization -# files_in_dir = [] -# for dirpath, dirnames, files in os.walk("/usr/local/met-6.1/bin"): -# for name in files: -# files_in_dir.append(os.path.join(dirpath, name)) -# if actual_key in files_in_dir: -# # The actual_key is one of the files of interest we retrieved from -# # the output directory. Verify that it's file size is what we -# # expected. -# assert actual_key == key -#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! METPLUS_BASE = os.getcwd().split('/internal_tests')[0] sys.path.append(METPLUS_BASE+'/ush/plotting_scripts') import plot_util logger = logging.getLogger('~/metplus_pytest_plot_util.log') + +@pytest.mark.plotting def test_get_date_arrays(): # Independently test the creation of # the date arrays, one used for plotting @@ -209,6 +184,8 @@ def test_get_date_arrays(): assert(test_expected_stat_file_dates[l] == expected_expected_stat_file_dates[l]) + +@pytest.mark.plotting def test_format_thresh(): # Independently test the formatting # of thresholds @@ -297,6 +274,8 @@ def test_format_thresh(): assert(test_thresh_symbol == expected_thresh_symbol) assert(test_thresh_letter == expected_thresh_letter) + +@pytest.mark.plotting def test_get_stat_file_base_columns(): # Independently test getting list # of the base MET version .stat file columns @@ -332,6 +311,8 @@ def test_get_stat_file_base_columns(): ) assert(test_stat_file_base_columns == expected_stat_file_base_columns) + +@pytest.mark.plotting def test_get_stat_file_line_type_columns(): # Independently test getting list # of the line type MET version .stat file columns @@ -441,6 +422,8 @@ def test_get_stat_file_line_type_columns(): assert(test_stat_file_line_type_columns == expected_stat_file_line_type_columns) + +@pytest.mark.plotting def get_clevels(): # Independently test creating an array # of levels centered about 0 to plot @@ -453,6 +436,8 @@ def get_clevels(): test_clevels = plot_util.get_clevels(data) assert(test_clevels == expected_clevels) + +@pytest.mark.plotting def test_calculate_average(): # Independently test getting the average # of a data array based on method @@ -558,7 +543,9 @@ def test_calculate_average(): assert(len(test_average_array) == len(expected_average_array)) for l in range(len(test_average_array)): assert(round(test_average_array[l],6) == expected_average_array[l]) - + + +@pytest.mark.long def test_calculate_ci(): pytest.skip("Takes far too long to run") # Independently test getting the @@ -691,6 +678,8 @@ def test_calculate_ci(): stat, average_method, randx) assert(test_intvl == expected_intvl) + +@pytest.mark.plotting def test_get_stat_plot_name(): # Independently test getting the # a more formalized statistic name @@ -730,6 +719,8 @@ def test_get_stat_plot_name(): test_stat_plot_name = plot_util.get_stat_plot_name(logger, stat) assert(test_stat_plot_name == expected_stat_plot_name) + +@pytest.mark.plotting def test_calculate_stat(): # Independently test calculating # statistic values diff --git a/internal_tests/pytests/tcmpr_plotter/test_tcmpr_plotter.py b/internal_tests/pytests/plotting/tcmpr_plotter/test_tcmpr_plotter.py similarity index 99% rename from internal_tests/pytests/tcmpr_plotter/test_tcmpr_plotter.py rename to internal_tests/pytests/plotting/tcmpr_plotter/test_tcmpr_plotter.py index e50c64ce29..519bcbb943 100644 --- a/internal_tests/pytests/tcmpr_plotter/test_tcmpr_plotter.py +++ b/internal_tests/pytests/plotting/tcmpr_plotter/test_tcmpr_plotter.py @@ -1,12 +1,8 @@ #!/usr/bin/env python3 -import os -import sys -import re import pytest -from datetime import datetime -import produtil +import os from metplus.wrappers.tcmpr_plotter_wrapper import TCMPRPlotterWrapper @@ -18,6 +14,7 @@ TIME_FMT = '%Y%m%d%H' RUN_TIME = '20141214' + def set_minimum_config_settings(config): # set config variables to prevent command from running and bypass check # if input files actually exist @@ -37,6 +34,7 @@ def set_minimum_config_settings(config): config.set('config', 'TCMPR_PLOTTER_PLOT_OUTPUT_DIR', '{OUTPUT_BASE}/TCMPRPlotter/tcmpr_plots') + @pytest.mark.parametrize( 'config_overrides,expected_loop_args', [ # 0: no loop args @@ -99,6 +97,7 @@ def set_minimum_config_settings(config): 'plot': [('pitem1', 'P Label 1'), ('pitem2', 'P Label 2')]}), ] ) +@pytest.mark.plotting def test_read_loop_info(metplus_config, config_overrides, expected_loop_args): config = metplus_config() @@ -111,6 +110,7 @@ def test_read_loop_info(metplus_config, config_overrides, expected_loop_args): wrapper = TCMPRPlotterWrapper(config) assert wrapper.read_loop_info() == expected_loop_args + @pytest.mark.parametrize( 'config_overrides,expected_strings', [ # 0: no optional arguments @@ -178,7 +178,7 @@ def test_read_loop_info(metplus_config, config_overrides, expected_loop_args): '-dep ditem1 -plot pitem1')]), ] ) - +@pytest.mark.plotting def test_tcmpr_plotter_loop(metplus_config, config_overrides, expected_strings): config = metplus_config() @@ -271,7 +271,7 @@ def test_tcmpr_plotter_loop(metplus_config, config_overrides, ({'TCMPR_PLOTTER_PLOT_TYPES': 'item1'}, '-plot item1'), ] ) - +@pytest.mark.plotting def test_tcmpr_plotter(metplus_config, config_overrides, expected_string): # add a space before value if expected string has a value if expected_string: diff --git a/internal_tests/pytests/produtil/README b/internal_tests/pytests/produtil/README deleted file mode 100644 index 540ba31e6d..0000000000 --- a/internal_tests/pytests/produtil/README +++ /dev/null @@ -1,23 +0,0 @@ -The test_produtil.py provides some simple tests for the produtil module. - -To run the test: - -1) cd to the directory METplus/internal_tests/pytest/produtil - -2) open the test_produtil.py file and replace the '/path/to' with the full path to the directory where your produtil_test.conf file -resides (this will be in METplus/internal_tests/pytest/produtil). - -NOTE: This is necessary, as we are NOT using run_metplus.py to begin the process of reading in the config -file, test_produtil.conf - - -2) Then, from the command line, enter the following command: - - pytest -c ./produtil_test.conf - -There are currently 9 tests and they should pass. - - - - - diff --git a/internal_tests/pytests/produtil/produtil_test.conf b/internal_tests/pytests/produtil/produtil_test.conf deleted file mode 100644 index 8d3b0b84b7..0000000000 --- a/internal_tests/pytests/produtil/produtil_test.conf +++ /dev/null @@ -1,31 +0,0 @@ - -# Test configuration for METplus produtil -[config] -STRING_VALUE = someStringValue!#@$% -INT_VALUE = 2908887 -RAW_VALUE = GRIB_lvl_type = 100 -BOOL_VALUE = True -NEW_LINES = very long line requiring newline character to be tested 12345 - 67890 end of the line. -UNASSIGNED_VALUE = -JOB_LIST = -job filter -dump_row {PROJ_DIR}/dump_file.out -job summary by AMAX_WIND -job summary 'ABS(AMAX_WIND-BMAX_WIND)' -out {OUTPUT_BASE}/max_wind_delta.tcst -JOBS = -job summary -by AMODEL,LEAD -column AMSLP -column BMSLP -column 'ABS(AMSLP-BMSLP)' -out {OUTPUT_BASE}/tc_stat_summary.out - -[dir] -# set in the metplus_data.conf to /path/to, override here for testing -PROJ_DIR = /tmp/produtil_testing - -# set in the metplus_system.conf to /path/to, override here for testing, set to -# appropriate version of MET -MET_INSTALL_DIR = /usr/local/met-8.1 -METPLUS_BASE = /usr/local/met-8.1 -OUTPUT_BASE = /tmp/produtil_testing/out -TMP_DIR = /tmp/produtil_testing/tmp - -# Used for testing -DIR_VALUE = /tmp/some_dir -BASE_DIR = /tmp -SPECIFIC_DIR = {BASE_DIR}/specific_place - -[exe] -WGRIB2 = wgrib2 diff --git a/internal_tests/pytests/produtil/test_produtil.py b/internal_tests/pytests/produtil/test_produtil.py deleted file mode 100644 index c5e816e742..0000000000 --- a/internal_tests/pytests/produtil/test_produtil.py +++ /dev/null @@ -1,144 +0,0 @@ -#!/usr/bin/env python3 - -import os -import subprocess -import produtil.setup -import sys -import logging -import pytest -from shutil import which - -from metplus.util import met_util as util - -# -# These are tests (not necessarily unit tests) for the -# MET Point-Stat Wrapper, PointStatWrapper.py -# NOTE: This test requires pytest, which is NOT part of the standard Python -# library. -# These tests require one configuration file in addition to the three -# required METplus configuration files: point_stat_test.conf. This contains -# the information necessary for running all the tests. Each test can be -# customized to replace various settings if needed. -# - -# -# -----------Mandatory----------- -# configuration and fixture to support METplus configuration files beyond -# the metplus_data, metplus_system, and metplus_runtime conf files. -# - - -# Add a test configuration -def pytest_addoption(parser): - parser.addoption("-c", action="store", help=" -c ") - - -# @pytest.fixture -def cmdopt(request): - return request.config.getoption("-c") - - -# ------------------------ -def dummy(): - assert(True) - -def get_config_obj(metplus_config): - """! Create the configuration object that is used by all tests""" - file_list = ["/path/to/METplus/internal_tests/pytests/produtil"] - extra_configs = [] - extra_configs.append(os.path.join(os.path.dirname(__file__), 'produtil_test.conf')) - config = metplus_config(extra_configs) - - return config - - -def test_getstr_ok(metplus_config): - """! Test that the expected string is retrieved via produtil's getstr - method - """ - conf_obj = get_config_obj(metplus_config) - str_value = conf_obj.getstr('config', 'STRING_VALUE') - expected_str_value = "someStringValue!#@$%" - assert str_value == expected_str_value - - -def test_getint_ok(metplus_config): - """! Test that the expected int in the produtil_test.conf file has been - retrieved correctly. - """ - conf_obj = get_config_obj(metplus_config) - expected_int_value = int(2908887) - int_value = conf_obj.getint('config', 'INT_VALUE') - assert int_value == expected_int_value - - - -def test_getdir_ok(metplus_config): - """! Test that the directory in the produtil_test.conf file has been - correctly retrieved. - """ - conf_obj = get_config_obj(metplus_config) - expected_dir = "/tmp/some_dir" - dir_retrieved = conf_obj.getdir('DIR_VALUE') - assert dir_retrieved == expected_dir - - -def test_getdir_compound_ok(metplus_config): - """! Test that directories created from other directories, ie. - BASE_DIR = /base/dir - SPECIFIC_DIR = {BASE_DIR}/specific/dir - - correctly returns the directory path for SPECIFIC_DIR - """ - expected_specific_dir = "/tmp/specific_place" - conf_obj = get_config_obj(metplus_config) - specific_dir = conf_obj.getdir('SPECIFIC_DIR') - assert specific_dir == expected_specific_dir - - -def test_no_value_as_string(metplus_config): - """! Tests that a key with no value returns an empty string.""" - - conf_obj = get_config_obj(metplus_config) - expected_unassigned = '' - unassigned = conf_obj.getstr('config', 'UNASSIGNED_VALUE') - print("unassigned: ", unassigned) - print("expected: ", expected_unassigned) - assert unassigned == expected_unassigned - - -def test_no_value_as_list(metplus_config): - """! Tests that a key with no list of strings returns an empty list.""" - - conf_obj = get_config_obj(metplus_config) - expected_unassigned = [] - unassigned = util.getlist(conf_obj.getstr('config', 'UNASSIGNED_VALUE')) - assert unassigned == expected_unassigned - - -def test_new_lines_in_conf(metplus_config): - """! Test that any newlines in the configuration file are handled - properly - """ - - conf_obj = get_config_obj(metplus_config) - expected_string = \ - "very long line requiring newline character to be tested 12345\n67890 end of the line." - long_line = conf_obj.getstr('config', 'NEW_LINES') - assert long_line == expected_string - - -def test_get_exe_ok(metplus_config): - """! Test that executables are correctly retrieved.""" - conf_obj = get_config_obj(metplus_config) - expected_exe = which('wgrib2') - executable = conf_obj.getexe('WGRIB2') - assert executable == expected_exe - - -def test_get_bool(metplus_config): - """! Test that boolean values are correctly retrieved.""" - conf_obj = get_config_obj(metplus_config) - bool_val = conf_obj.getbool('config', 'BOOL_VALUE') - assert bool_val is True - diff --git a/internal_tests/pytests/produtil/work_in_progress_test_produtil_regression.py b/internal_tests/pytests/produtil/work_in_progress_test_produtil_regression.py deleted file mode 100644 index 580e376cc7..0000000000 --- a/internal_tests/pytests/produtil/work_in_progress_test_produtil_regression.py +++ /dev/null @@ -1,159 +0,0 @@ -#!/usr/bin/env python3 - -import os -import subprocess -import produtil -import sys -import logging -import pytest -import config_metplus -import config_launcher as launcher -import met_util as util - - -# -# These are tests (not necessarily unit tests) for the -# MET Point-Stat Wrapper, PointStatWrapper.py -# NOTE: This test requires pytest, which is NOT part of the standard Python -# library. -# These tests require one configuration file in addition to the three -# required METplus configuration files: point_stat_test.conf. This contains -# the information necessary for running all the tests. Each test can be -# customized to replace various settings if needed. -# - -# -# -----------Mandatory----------- -# configuration and fixture to support METplus configuration files beyond -# the metplus_data, metplus_system, and metplus_runtime conf files. -# - - -# Add a test configuration -# def pytest_addoption(parser): -# parser.addoption("-c", action="store", help=" -c ") -# -# -# # @pytest.fixture -# def cmdopt(request): -# return request.config.getoption("-c") - - -# ------------------------ - -def dummy(): - assert(True) - - -def get_config_obj(): - """! Create the configuration object that is used by all tests""" - file_list = ["METplus/internal_tests/pytests/produtil"] - config_obj = config_metplus.setup(file_list[0]) - - return config_obj - - -def test_getstr_ok(regtest): - """! Test that the expected string is retrieved via produtil's getstr - method - """ - conf_obj = get_config_obj() - str_value = conf_obj.getstr('config', 'STRING_VALUE') - expected_str_value = "someStringValue!#@$%" - # print(str_value, file=regtest) - regtest.write("done") -# -# -# def test_getint_ok(regtest): -# """! Test that the expected int in the produtil_test.conf file has been -# retrieved correctly. -# """ -# conf_obj = get_config_obj() -# expected_int_value = int(2908887) -# int_value = conf_obj.getint('config', 'INT_VALUE') -# # print(int_value, file=regtest) -# regtest.write("done") -# -# -# def test_getraw_ok(regtest): -# """! Test that the raw value in the produtil_test.conf file has been -# retrieved correctly. -# """ -# conf_obj = get_config_obj() -# expected_raw = 'GRIB_lvl_type = 100' -# raw_value = conf_obj.getraw('config', 'RAW_VALUE') -# # print(raw_value, file=regtest) -# # regtest.write("done") -# -# -# def test_getdir_ok(regtest): -# """! Test that the directory in the produtil_test.conf file has been -# correctly retrieved. -# """ -# conf_obj = get_config_obj() -# expected_dir = "/tmp/some_dir" -# dir_retrieved = conf_obj.getdir('DIR_VALUE') -# # print(dir_retrieved, file=regtest) -# -# -# def test_getdir_compound_ok(regtest): -# """! Test that directories created from other directories, ie. -# BASE_DIR = /base/dir -# SPECIFIC_DIR = {BASE_DIR}/specific/dir -# -# correctly returns the directory path for SPECIFIC_DIR -# """ -# expected_specific_dir = "/tmp/specific_place" -# conf_obj = get_config_obj() -# specific_dir = conf_obj.getdir('SPECIFIC_DIR') -# print(specific_dir, file=regtest) -# -# -# def test_no_value_as_string(regtest): -# """! Tests that a key with no value returns an empty string.""" -# -# conf_obj = get_config_obj() -# expected_unassigned = '' -# unassigned = conf_obj.getstr('config', 'UNASSIGNED_VALUE') -# # print(unassigned, file=regtest) -# -# -# def test_no_value_as_list(regtest): -# """! Tests that a key with no list of strings returns an empty list.""" -# -# conf_obj = get_config_obj() -# expected_unassigned = [] -# unassigned = util.getlist(conf_obj.getstr('config', 'UNASSIGNED_VALUE')) -# assert unassigned == expected_unassigned -# # print(unassigned, file=regtest) -# -# -# def test_new_lines_in_conf(regtest): -# """! Test that any newlines in the configuration file are handled -# properly -# """ -# -# conf_obj = get_config_obj() -# expected_string = \ -# "very long line requiring newline character to be tested 12345\n67890 end of the line." -# long_line = conf_obj.getstr('config', 'NEW_LINES') -# assert long_line == expected_string -# # print(long_line, file=regtest) -# -# -# def test_get_exe_ok(regtest): -# """! Test that executables are correctly retrieved.""" -# conf_obj = get_config_obj() -# expected_exe = '/usr/local/bin/wgrib2' -# executable = conf_obj.getexe('WGRIB2') -# assert executable == expected_exe -# # print(executable, file=regtest) -# -# -# def test_get_bool(regtest): -# """! Test that boolean values are correctly retrieved.""" -# conf_obj = get_config_obj() -# bool_val = conf_obj.getbool('config', 'BOOL_VALUE') -# assert bool_val is True -# # print(bool_val, file=regtest) -# diff --git a/internal_tests/pytests/pytest.ini b/internal_tests/pytests/pytest.ini new file mode 100644 index 0000000000..8630509ec0 --- /dev/null +++ b/internal_tests/pytests/pytest.ini @@ -0,0 +1,9 @@ +[pytest] +markers = + util: custom marker for testing metplus/util logic + wrapper_a: custom marker for testing metplus/wrapper logic - A group + wrapper_b: custom marker for testing metplus/wrapper logic - B group + wrapper_c: custom marker for testing metplus/wrapper logic - C group + wrapper: custom marker for testing metplus/wrapper logic - all others + long: custom marker for tests that take a long time to run + plotting: custom marker for tests that involve plotting diff --git a/internal_tests/pytests/config/config_1.conf b/internal_tests/pytests/util/config/config_1.conf similarity index 100% rename from internal_tests/pytests/config/config_1.conf rename to internal_tests/pytests/util/config/config_1.conf diff --git a/internal_tests/pytests/config/config_2.conf b/internal_tests/pytests/util/config/config_2.conf similarity index 100% rename from internal_tests/pytests/config/config_2.conf rename to internal_tests/pytests/util/config/config_2.conf diff --git a/internal_tests/pytests/config/config_3.conf b/internal_tests/pytests/util/config/config_3.conf similarity index 100% rename from internal_tests/pytests/config/config_3.conf rename to internal_tests/pytests/util/config/config_3.conf diff --git a/internal_tests/pytests/config/test_config.py b/internal_tests/pytests/util/config/test_config.py similarity index 89% rename from internal_tests/pytests/config/test_config.py rename to internal_tests/pytests/util/config/test_config.py index a18678af32..7c054ab3d8 100644 --- a/internal_tests/pytests/config/test_config.py +++ b/internal_tests/pytests/util/config/test_config.py @@ -1,16 +1,14 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 -import sys import pytest -import datetime + import os from configparser import NoOptionError from shutil import which -import produtil - from metplus.util import met_util as util + @pytest.mark.parametrize( 'input_value, result', [ (3600, 3600), @@ -28,6 +26,7 @@ (None, None), ] ) +@pytest.mark.util def test_getseconds(metplus_config, input_value, result): conf = metplus_config() if input_value is not None: @@ -35,10 +34,11 @@ def test_getseconds(metplus_config, input_value, result): try: seconds = conf.getseconds('config', 'TEST_SECONDS') - assert(seconds == result) + assert seconds == result except NoOptionError: if result is None: - assert(True) + assert True + # value = None -- config variable not set @pytest.mark.parametrize( @@ -55,6 +55,7 @@ def test_getseconds(metplus_config, input_value, result): (None, '1', '1'), ] ) +@pytest.mark.util def test_getstr(metplus_config, input_value, default, result): conf = metplus_config() if input_value is not None: @@ -62,10 +63,11 @@ def test_getstr(metplus_config, input_value, default, result): # catch NoOptionError exception and pass test if default is None try: - assert(result == conf.getstr('config', 'TEST_GETSTR', default)) + assert result == conf.getstr('config', 'TEST_GETSTR', default) except NoOptionError: if default is None: - assert(True) + assert True + # value = None -- config variable not set @pytest.mark.parametrize( @@ -78,6 +80,7 @@ def test_getstr(metplus_config, input_value, default, result): ] ) +@pytest.mark.util def test_getdir(metplus_config, input_value, default, result): conf = metplus_config() if input_value is not None: @@ -85,13 +88,14 @@ def test_getdir(metplus_config, input_value, default, result): # catch NoOptionError exception and pass test if default is None try: - assert(result == conf.getdir('TEST_GETSTR', default=default)) + assert result == conf.getdir('TEST_GETSTR', default=default) except NoOptionError: if result is 'NoOptionError': - assert(True) + assert True except ValueError: if result is 'ValueError': - assert(True) + assert True + # value = None -- config variable not set @pytest.mark.parametrize( @@ -104,6 +108,7 @@ def test_getdir(metplus_config, input_value, default, result): ('{valid?fmt=%Y%m%d}_{NOT_REAL_VAR}', None, '{valid?fmt=%Y%m%d}_{NOT_REAL_VAR}'), ] ) +@pytest.mark.util def test_getraw(metplus_config, input_value, default, result): conf = metplus_config() conf.set('config', 'TEST_EXTRA', 'extra') @@ -112,7 +117,7 @@ def test_getraw(metplus_config, input_value, default, result): if input_value is not None: conf.set('config', 'TEST_GETRAW', input_value) - assert(result == conf.getraw('config', 'TEST_GETRAW', default=default)) + assert result == conf.getraw('config', 'TEST_GETRAW', default=default) # value = None -- config variable not set @@ -137,6 +142,7 @@ def test_getraw(metplus_config, input_value, default, result): (None, None, None), ] ) +@pytest.mark.util def test_getbool(metplus_config, input_value, default, result): conf = metplus_config() if input_value is not None: @@ -144,10 +150,11 @@ def test_getbool(metplus_config, input_value, default, result): # catch NoOptionError exception and pass test if default is None try: - assert(result == conf.getbool('config', 'TEST_GETBOOL', default)) + assert result == conf.getbool('config', 'TEST_GETBOOL', default) except NoOptionError: if result is None: - assert(True) + assert True + # value = None -- config variable not set @pytest.mark.parametrize( @@ -158,12 +165,13 @@ def test_getbool(metplus_config, input_value, default, result): ('sh', which('sh')), ] ) +@pytest.mark.util def test_getexe(metplus_config, input_value, result): conf = metplus_config() if input_value is not None: conf.set('config', 'TEST_GETEXE', input_value) - assert(result == conf.getexe('TEST_GETEXE')) + assert result == conf.getexe('TEST_GETEXE') # value = None -- config variable not set @pytest.mark.parametrize( @@ -186,10 +194,11 @@ def test_getfloat(metplus_config, input_value, default, result): conf.set('config', 'TEST_GETFLOAT', input_value) try: - assert(result == conf.getfloat('config', 'TEST_GETFLOAT', default)) + assert result == conf.getfloat('config', 'TEST_GETFLOAT', default) except ValueError: if result is None: - assert(True) + assert True + # value = None -- config variable not set @pytest.mark.parametrize( @@ -209,16 +218,18 @@ def test_getfloat(metplus_config, input_value, default, result): ('', 2.2, util.MISSING_DATA_VALUE), ] ) +@pytest.mark.util def test_getint(metplus_config, input_value, default, result): conf = metplus_config() if input_value is not None: conf.set('config', 'TEST_GETINT', input_value) try: - assert(result == conf.getint('config', 'TEST_GETINT', default)) + assert result == conf.getint('config', 'TEST_GETINT', default) except ValueError: if result is None: - assert(True) + assert True + @pytest.mark.parametrize( 'config_key, expected_result', [ @@ -229,6 +240,7 @@ def test_getint(metplus_config, input_value, default, result): ('VAR_TO_TEST_A', 'A3'), ] ) +@pytest.mark.util def test_move_all_to_config_section(metplus_config, config_key, expected_result): config_files = ['config_1.conf', 'config_2.conf', @@ -237,7 +249,8 @@ def test_move_all_to_config_section(metplus_config, config_key, expected_result) test_dir = os.path.dirname(__file__) config_files = [os.path.join(test_dir, item) for item in config_files] config = metplus_config(config_files) - assert(config.getstr('config', config_key) == expected_result) + assert config.getstr('config', config_key) == expected_result + @pytest.mark.parametrize( 'overrides, config_key, expected_result', [ @@ -266,10 +279,12 @@ def test_move_all_to_config_section(metplus_config, config_key, expected_result) 'CMD_LINE_1', '2'), ] ) +@pytest.mark.util def test_move_all_to_config_section_cmd_line(metplus_config, overrides, config_key, expected_result): config = metplus_config(overrides) - assert(config.getstr('config', config_key, '') == expected_result) + assert config.getstr('config', config_key, '') == expected_result + @pytest.mark.parametrize( 'config_name, expected_result', [ @@ -314,6 +329,7 @@ def test_move_all_to_config_section_cmd_line(metplus_config, overrides, ), ] ) +@pytest.mark.util def test_getraw_nested_curly_braces(metplus_config, config_name, expected_result): @@ -323,4 +339,4 @@ def test_getraw_nested_curly_braces(metplus_config, config_files = [os.path.join(test_dir, item) for item in config_files] config = metplus_config(config_files) sec, name = config_name.split('.', 1) - assert(config.getraw(sec, name) == expected_result) + assert config.getraw(sec, name) == expected_result diff --git a/internal_tests/pytests/config_metplus/test_config_metplus.py b/internal_tests/pytests/util/config_metplus/test_config_metplus.py similarity index 95% rename from internal_tests/pytests/config_metplus/test_config_metplus.py rename to internal_tests/pytests/util/config_metplus/test_config_metplus.py index f0e669443b..07a32655f5 100644 --- a/internal_tests/pytests/config_metplus/test_config_metplus.py +++ b/internal_tests/pytests/util/config_metplus/test_config_metplus.py @@ -1,14 +1,18 @@ #!/usr/bin/env python3 import pytest + import pprint import os from datetime import datetime from metplus.util import config_metplus + +@pytest.mark.util def test_get_default_config_list(): test_data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), + os.pardir, os.pardir, os.pardir, 'data', @@ -37,6 +41,7 @@ def test_get_default_config_list(): assert actual_new == expected_new assert actual_both == expected_both + @pytest.mark.parametrize( 'regex,index,id,expected_result', [ # 0: No ID @@ -64,6 +69,7 @@ def test_get_default_config_list(): '2': ['NAME', 'MEMBERS', 'REQUIRED', 'MIN_REQ']}), ] ) +@pytest.mark.util def test_find_indices_in_config_section(metplus_config, regex, index, id, expected_result): config = metplus_config() @@ -83,7 +89,6 @@ def test_find_indices_in_config_section(metplus_config, regex, index, config.set('config', 'TC_PAIRS_CONSENSUS2_REQUIRED', 'True') config.set('config', 'TC_PAIRS_CONSENSUS2_MIN_REQ', '2') - indices = config_metplus.find_indices_in_config_section(regex, config, index_index=index, id_index=id) @@ -94,6 +99,7 @@ def test_find_indices_in_config_section(metplus_config, regex, index, assert indices == expected_result + @pytest.mark.parametrize( 'conf_items, met_tool, expected_result', [ ({'CUSTOM_LOOP_LIST': "one, two, three"}, '', ['one', 'two', 'three']), @@ -110,12 +116,14 @@ def test_find_indices_in_config_section(metplus_config, regex, index, 'POINT2GRID_CUSTOM_LOOP_LIST': "four, five",}, 'point2grid', ['four', 'five']), ] ) +@pytest.mark.util def test_get_custom_string_list(metplus_config, conf_items, met_tool, expected_result): config = metplus_config() for conf_key, conf_value in conf_items.items(): config.set('config', conf_key, conf_value) - assert(config_metplus.get_custom_string_list(config, met_tool) == expected_result) + assert config_metplus.get_custom_string_list(config, met_tool) == expected_result + @pytest.mark.parametrize( 'config_var_name, expected_indices, set_met_tool', [ @@ -133,6 +141,7 @@ def test_get_custom_string_list(metplus_config, conf_items, met_tool, expected_r ('BOTH_VAR12_FIELD_NAME', ['12'], False), ] ) +@pytest.mark.util def test_find_var_indices_fcst(metplus_config, config_var_name, expected_indices, @@ -145,9 +154,10 @@ def test_find_var_indices_fcst(metplus_config, data_types=data_types, met_tool=met_tool) - assert(len(var_name_indices) == len(expected_indices)) + assert len(var_name_indices) == len(expected_indices) for actual_index in var_name_indices: - assert(actual_index in expected_indices) + assert actual_index in expected_indices + @pytest.mark.parametrize( 'data_type, met_tool, expected_out', [ @@ -172,10 +182,12 @@ def test_find_var_indices_fcst(metplus_config, ] ) +@pytest.mark.util def test_get_field_search_prefixes(data_type, met_tool, expected_out): assert(config_metplus.get_field_search_prefixes(data_type, met_tool) == expected_out) + @pytest.mark.parametrize( 'item_list, extension, is_valid', [ (['FCST'], 'NAME', False), @@ -215,9 +227,11 @@ def test_get_field_search_prefixes(data_type, met_tool, expected_out): ] ) +@pytest.mark.util def test_is_var_item_valid(metplus_config, item_list, extension, is_valid): conf = metplus_config() - assert(config_metplus.is_var_item_valid(item_list, '1', extension, conf)[0] == is_valid) + assert config_metplus.is_var_item_valid(item_list, '1', extension, conf)[0] == is_valid + @pytest.mark.parametrize( 'item_list, configs_to_set, is_valid', [ @@ -256,12 +270,14 @@ def test_is_var_item_valid(metplus_config, item_list, extension, is_valid): ] ) +@pytest.mark.util def test_is_var_item_valid_levels(metplus_config, item_list, configs_to_set, is_valid): conf = metplus_config() for key, value in configs_to_set.items(): conf.set('config', key, value) - assert(config_metplus.is_var_item_valid(item_list, '1', 'LEVELS', conf)[0] == is_valid) + assert config_metplus.is_var_item_valid(item_list, '1', 'LEVELS', conf)[0] == is_valid + # search prefixes are valid prefixes to append to field info variables # config_overrides are a dict of config vars and their values @@ -300,6 +316,7 @@ def test_is_var_item_valid_levels(metplus_config, item_list, configs_to_set, is_ ] ) +@pytest.mark.util def test_get_field_config_variables(metplus_config, search_prefixes, config_overrides, @@ -317,7 +334,8 @@ def test_get_field_config_variables(metplus_config, index, search_prefixes) - assert(field_configs.get(field_info_type) == expected_value) + assert field_configs.get(field_info_type) == expected_value + @pytest.mark.parametrize( 'config_keys, field_key, expected_value', [ @@ -365,6 +383,7 @@ def test_get_field_config_variables(metplus_config, ([], 'output_names', None), ] ) +@pytest.mark.util def test_get_field_config_variables_synonyms(metplus_config, config_keys, field_key, @@ -379,7 +398,8 @@ def test_get_field_config_variables_synonyms(metplus_config, index, [prefix]) - assert(field_configs.get(field_key) == expected_value) + assert field_configs.get(field_key) == expected_value + # field info only defined in the FCST_* variables @pytest.mark.parametrize( @@ -389,6 +409,7 @@ def test_get_field_config_variables_synonyms(metplus_config, ('OBS', False), ] ) +@pytest.mark.util def test_parse_var_list_fcst_only(metplus_config, data_type, list_created): conf = metplus_config() conf.set('config', 'FCST_VAR1_NAME', "NAME1") @@ -398,7 +419,7 @@ def test_parse_var_list_fcst_only(metplus_config, data_type, list_created): # this should not occur because OBS variables are missing if config_metplus.validate_configuration_variables(conf, force_check=True)[1]: - assert(False) + assert False var_list = config_metplus.parse_var_list(conf, time_info=None, data_type=data_type) @@ -414,7 +435,8 @@ def test_parse_var_list_fcst_only(metplus_config, data_type, list_created): var_list[2]['fcst_level'] == "LEVELS21" and \ var_list[3]['fcst_level'] == "LEVELS22") else: - assert(not var_list) + assert not var_list + # field info only defined in the OBS_* variables @pytest.mark.parametrize( @@ -424,6 +446,7 @@ def test_parse_var_list_fcst_only(metplus_config, data_type, list_created): ('FCST', False), ] ) +@pytest.mark.util def test_parse_var_list_obs(metplus_config, data_type, list_created): conf = metplus_config() conf.set('config', 'OBS_VAR1_NAME', "NAME1") @@ -433,7 +456,7 @@ def test_parse_var_list_obs(metplus_config, data_type, list_created): # this should not occur because FCST variables are missing if config_metplus.validate_configuration_variables(conf, force_check=True)[1]: - assert(False) + assert False var_list = config_metplus.parse_var_list(conf, time_info=None, data_type=data_type) @@ -449,7 +472,7 @@ def test_parse_var_list_obs(metplus_config, data_type, list_created): var_list[2]['obs_level'] == "LEVELS21" and \ var_list[3]['obs_level'] == "LEVELS22") else: - assert(not var_list) + assert not var_list # field info only defined in the BOTH_* variables @@ -460,6 +483,7 @@ def test_parse_var_list_obs(metplus_config, data_type, list_created): ('OBS', 'obs'), ] ) +@pytest.mark.util def test_parse_var_list_both(metplus_config, data_type, list_created): conf = metplus_config() conf.set('config', 'BOTH_VAR1_NAME', "NAME1") @@ -469,7 +493,7 @@ def test_parse_var_list_both(metplus_config, data_type, list_created): # this should not occur because BOTH variables are used if not config_metplus.validate_configuration_variables(conf, force_check=True)[1]: - assert(False) + assert False var_list = config_metplus.parse_var_list(conf, time_info=None, data_type=data_type) print(f'var_list:{var_list}') @@ -482,9 +506,11 @@ def test_parse_var_list_both(metplus_config, data_type, list_created): not var_list[1][f'{list_to_check}_level'] == "LEVELS12" or \ not var_list[2][f'{list_to_check}_level'] == "LEVELS21" or \ not var_list[3][f'{list_to_check}_level'] == "LEVELS22": - assert(False) + assert False + # field info defined in both FCST_* and OBS_* variables +@pytest.mark.util def test_parse_var_list_fcst_and_obs(metplus_config): conf = metplus_config() conf.set('config', 'FCST_VAR1_NAME', "FNAME1") @@ -498,7 +524,7 @@ def test_parse_var_list_fcst_and_obs(metplus_config): # this should not occur because FCST and OBS variables are found if not config_metplus.validate_configuration_variables(conf, force_check=True)[1]: - assert(False) + assert False var_list = config_metplus.parse_var_list(conf) @@ -519,7 +545,9 @@ def test_parse_var_list_fcst_and_obs(metplus_config): var_list[3]['fcst_level'] == "FLEVELS22" and \ var_list[3]['obs_level'] == "OLEVELS22") + # VAR1 defined by FCST, VAR2 defined by OBS +@pytest.mark.util def test_parse_var_list_fcst_and_obs_alternate(metplus_config): conf = metplus_config() conf.set('config', 'FCST_VAR1_NAME', "FNAME1") @@ -530,6 +558,7 @@ def test_parse_var_list_fcst_and_obs_alternate(metplus_config): # configuration is invalid and parse var list should not give any results assert(not config_metplus.validate_configuration_variables(conf, force_check=True)[1] and not config_metplus.parse_var_list(conf)) + # VAR1 defined by OBS, VAR2 by FCST, VAR3 by both FCST AND OBS @pytest.mark.parametrize( 'data_type, list_len, name_levels', [ @@ -538,6 +567,7 @@ def test_parse_var_list_fcst_and_obs_alternate(metplus_config): ('OBS', 4, ('ONAME1:OLEVELS11','ONAME1:OLEVELS12','ONAME3:OLEVELS31','ONAME3:OLEVELS32')), ] ) +@pytest.mark.util def test_parse_var_list_fcst_and_obs_and_both(metplus_config, data_type, list_len, name_levels): conf = metplus_config() conf.set('config', 'OBS_VAR1_NAME', "ONAME1") @@ -551,15 +581,15 @@ def test_parse_var_list_fcst_and_obs_and_both(metplus_config, data_type, list_le # configuration is invalid and parse var list should not give any results if config_metplus.validate_configuration_variables(conf, force_check=True)[1]: - assert(False) + assert False var_list = config_metplus.parse_var_list(conf, time_info=None, data_type=data_type) if len(var_list) != list_len: - assert(False) + assert False if data_type is None: - assert(len(var_list) == 0) + assert len(var_list) == 0 if name_levels is not None: dt_lower = data_type.lower() @@ -571,12 +601,13 @@ def test_parse_var_list_fcst_and_obs_and_both(metplus_config, data_type, list_le for expect, reality in zip(expected,var_list): if expect[f'{dt_lower}_name'] != reality[f'{dt_lower}_name']: - assert(False) + assert False if expect[f'{dt_lower}_level'] != reality[f'{dt_lower}_level']: - assert(False) + assert False + + assert True - assert(True) # option defined in obs only @pytest.mark.parametrize( @@ -586,6 +617,7 @@ def test_parse_var_list_fcst_and_obs_and_both(metplus_config, data_type, list_le ('OBS', 0), ] ) +@pytest.mark.util def test_parse_var_list_fcst_only_options(metplus_config, data_type, list_len): conf = metplus_config() conf.set('config', 'FCST_VAR1_NAME', "NAME1") @@ -595,11 +627,12 @@ def test_parse_var_list_fcst_only_options(metplus_config, data_type, list_len): # this should not occur because OBS variables are missing if config_metplus.validate_configuration_variables(conf, force_check=True)[1]: - assert(False) + assert False var_list = config_metplus.parse_var_list(conf, time_info=None, data_type=data_type) - assert(len(var_list) == list_len) + assert len(var_list) == list_len + @pytest.mark.parametrize( 'met_tool, indices', [ @@ -608,6 +641,7 @@ def test_parse_var_list_fcst_only_options(metplus_config, data_type, list_len): ('ENSEMBLE_STAT', {}), ] ) +@pytest.mark.util def test_find_var_indices_wrapper_specific(metplus_config, met_tool, indices): conf = metplus_config() data_type = 'FCST' @@ -617,11 +651,13 @@ def test_find_var_indices_wrapper_specific(metplus_config, met_tool, indices): var_name_indices = config_metplus.find_var_name_indices(conf, data_types=[data_type], met_tool=met_tool) - assert(var_name_indices == indices) + assert var_name_indices == indices + # ensure that the field configuration used for # met_tool_wrapper/EnsembleStat/EnsembleStat.conf # works as expected +@pytest.mark.util def test_parse_var_list_ensemble(metplus_config): config = metplus_config() config.set('config', 'ENS_VAR1_NAME', 'APCP') @@ -704,13 +740,15 @@ def test_parse_var_list_ensemble(metplus_config): assert(len(ensemble_var_list) == len(expected_ens_list)) for actual_ens, expected_ens in zip(ensemble_var_list, expected_ens_list): for key, value in expected_ens.items(): - assert(actual_ens.get(key) == value) + assert actual_ens.get(key) == value assert(len(var_list) == len(expected_var_list)) for actual_var, expected_var in zip(var_list, expected_var_list): for key, value in expected_var.items(): - assert(actual_var.get(key) == value) + assert actual_var.get(key) == value + +@pytest.mark.util def test_parse_var_list_series_by(metplus_config): config = metplus_config() config.set('config', 'BOTH_EXTRACT_TILES_VAR1_NAME', 'RH') @@ -768,16 +806,18 @@ def test_parse_var_list_series_by(metplus_config): print(f'SeriesAnalysis var list:') pp.pprint(actual_sa_list) - assert(len(actual_et_list) == len(expected_et_list)) + assert len(actual_et_list) == len(expected_et_list) for actual_et, expected_et in zip(actual_et_list, expected_et_list): for key, value in expected_et.items(): - assert(actual_et.get(key) == value) + assert actual_et.get(key) == value assert(len(actual_sa_list) == len(expected_sa_list)) for actual_sa, expected_sa in zip(actual_sa_list, expected_sa_list): for key, value in expected_sa.items(): - assert(actual_sa.get(key) == value) + assert actual_sa.get(key) == value + +@pytest.mark.util def test_parse_var_list_priority_fcst(metplus_config): priority_list = ['FCST_GRID_STAT_VAR1_NAME', 'FCST_GRID_STAT_VAR1_INPUT_FIELD_NAME', @@ -806,13 +846,15 @@ def test_parse_var_list_priority_fcst(metplus_config): data_type='FCST', met_tool='grid_stat') - assert(len(var_list) == 1) - assert(var_list[0].get('fcst_name') == priority_list[0].lower()) + assert len(var_list) == 1 + assert var_list[0].get('fcst_name') == priority_list[0].lower() priority_list.pop(0) + # test that if wrapper specific field info is specified, it only gets # values from that list. All generic values should be read if no # wrapper specific field info variables are specified +@pytest.mark.util def test_parse_var_list_wrapper_specific(metplus_config): conf = metplus_config() conf.set('config', 'FCST_VAR1_NAME', "ENAME1") @@ -846,6 +888,7 @@ def test_parse_var_list_wrapper_specific(metplus_config): g_var_list[0]['fcst_level'] == "GLEVELS11" and g_var_list[1]['fcst_level'] == "GLEVELS12") + @pytest.mark.parametrize( 'config_overrides, expected_results', [ # 2 levels @@ -896,6 +939,7 @@ def test_parse_var_list_wrapper_specific(metplus_config): ]), ] ) +@pytest.mark.util def test_parse_var_list_py_embed_multi_levels(metplus_config, config_overrides, expected_results): config = metplus_config() @@ -906,19 +950,19 @@ def test_parse_var_list_py_embed_multi_levels(metplus_config, config_overrides, var_list = config_metplus.parse_var_list(config, time_info=time_info, data_type=None) - assert(len(var_list) == len(expected_results)) + assert len(var_list) == len(expected_results) for var_item, expected_result in zip(var_list, expected_results): - assert(var_item['fcst_name'] == expected_result) + assert var_item['fcst_name'] == expected_result # run again with data type specified var_list = config_metplus.parse_var_list(config, time_info=time_info, data_type='FCST') - assert(len(var_list) == len(expected_results)) + assert len(var_list) == len(expected_results) for var_item, expected_result in zip(var_list, expected_results): - assert(var_item['fcst_name'] == expected_result) + assert var_item['fcst_name'] == expected_result @pytest.mark.parametrize( @@ -955,12 +999,14 @@ def test_parse_var_list_py_embed_multi_levels(metplus_config, config_overrides, ('StatAnalysis, MakePlots', ['StatAnalysis']), ] ) +@pytest.mark.util def test_get_process_list(metplus_config, input_list, expected_list): conf = metplus_config() conf.set('config', 'PROCESS_LIST', input_list) process_list = config_metplus.get_process_list(conf) output_list = [item[0] for item in process_list] - assert(output_list == expected_list) + assert output_list == expected_list + @pytest.mark.parametrize( 'input_list, expected_list', [ @@ -987,12 +1033,15 @@ def test_get_process_list(metplus_config, input_list, expected_list): ('TCStat', 'two')]), ] ) +@pytest.mark.util def test_get_process_list_instances(metplus_config, input_list, expected_list): conf = metplus_config() conf.set('config', 'PROCESS_LIST', input_list) output_list = config_metplus.get_process_list(conf) - assert(output_list == expected_list) + assert output_list == expected_list + +@pytest.mark.util def test_getraw_sub_and_nosub(metplus_config): raw_string = '{MODEL}_{CURRENT_FCST_NAME}' sub_actual = 'FCST_NAME' @@ -1007,6 +1056,8 @@ def test_getraw_sub_and_nosub(metplus_config): sub_value = config.getraw('config', 'OUTPUT_PREFIX', sub_vars=True) assert sub_value == sub_actual + +@pytest.mark.util def test_getraw_instance_with_unset_var(metplus_config): """! Replicates bug where CURRENT_FCST_NAME is substituted with an empty string when copied from an instance section diff --git a/internal_tests/pytests/logging/test_logging.py b/internal_tests/pytests/util/logging/test_logging.py similarity index 79% rename from internal_tests/pytests/logging/test_logging.py rename to internal_tests/pytests/util/logging/test_logging.py index 7e31851c3a..68eca3262d 100644 --- a/internal_tests/pytests/logging/test_logging.py +++ b/internal_tests/pytests/util/logging/test_logging.py @@ -1,26 +1,13 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 + +import pytest import logging import re import os -import pytest - -# -# -----------Mandatory----------- -# configuration and fixture to support METplus configuration files beyond -# the metplus_data, metplus_system, and metplus_runtime conf files. -# - - -# Add a test configuration -def pytest_addoption(parser): - parser.addoption("-c", action="store", help=" -c ") - -# @pytest.fixture -def cmdopt(request): - return request.config.getoption("-c") +@pytest.mark.util def test_log_level(metplus_config): # Verify that the log level is set to what we indicated in the config file. config = metplus_config() @@ -30,6 +17,7 @@ def test_log_level(metplus_config): assert fixture_logger.isEnabledFor(level) +@pytest.mark.util def test_log_level_key(metplus_config): # Verify that the LOG_LEVEL key is in the config file config_instance = metplus_config() @@ -38,6 +26,7 @@ def test_log_level_key(metplus_config): assert config_instance.has_option(section, option) +@pytest.mark.util def test_logdir_exists(metplus_config): # Verify that the expected log dir exists. config = metplus_config() @@ -47,6 +36,7 @@ def test_logdir_exists(metplus_config): assert os.path.exists(log_dir) +@pytest.mark.util def test_logfile_exists(metplus_config): # Verify that a logfile with format metplus.log exists # We are assuming that there can be numerous files in the log directory. @@ -67,9 +57,3 @@ def test_logfile_exists(metplus_config): else: # There is no log directory assert False - - - - - - diff --git a/internal_tests/pytests/met_config/test_met_config.py b/internal_tests/pytests/util/met_config/test_met_config.py similarity index 96% rename from internal_tests/pytests/met_config/test_met_config.py rename to internal_tests/pytests/util/met_config/test_met_config.py index 0f990e678b..0f3adb6587 100644 --- a/internal_tests/pytests/met_config/test_met_config.py +++ b/internal_tests/pytests/util/met_config/test_met_config.py @@ -6,6 +6,7 @@ from metplus.util.met_config import _read_climo_file_name, _read_climo_field from metplus.util import CLIMO_TYPES + @pytest.mark.parametrize( 'config_overrides, expected_value', [ # 0 no relevant config set @@ -30,6 +31,7 @@ '{ name="TMP"; level="(*,*)"; }'), ] ) +@pytest.mark.util def test_read_climo_field(metplus_config, config_overrides, expected_value): app_name = 'app' for climo_type in ('MEAN', 'STDEV'): @@ -45,6 +47,7 @@ def test_read_climo_field(metplus_config, config_overrides, expected_value): _read_climo_field(climo_type, config, app_name) assert config.getraw('config', expected_var) == expected_value + @pytest.mark.parametrize( 'config_overrides, expected_value', [ # 0 no relevant config set @@ -127,6 +130,7 @@ def test_read_climo_field(metplus_config, config_overrides, expected_value): 'hour_interval = 12;}')), ] ) +@pytest.mark.util def test_handle_climo_dict(metplus_config, config_overrides, expected_value): app_name = 'app' for climo_type in ('MEAN', 'STDEV'): @@ -145,27 +149,30 @@ def test_handle_climo_dict(metplus_config, config_overrides, expected_value): expected_sub = expected_value.replace('', climo_type.lower()) assert output_dict[expected_var] == expected_sub + @pytest.mark.parametrize( 'name, data_type, mp_configs, extra_args', [ ('beg', 'int', 'BEG', None), ('end', 'int', ['END'], None), ] ) +@pytest.mark.util def test_met_config_info(name, data_type, mp_configs, extra_args): item = METConfig(name=name, data_type=data_type) item.metplus_configs = mp_configs item.extra_args = extra_args - assert(item.name == name) - assert(item.data_type == data_type) + assert item.name == name + assert item.data_type == data_type if isinstance(mp_configs, list): - assert(item.metplus_configs == mp_configs) + assert item.metplus_configs == mp_configs else: - assert(item.metplus_configs == [mp_configs]) + assert item.metplus_configs == [mp_configs] if not extra_args: - assert(item.extra_args == {}) + assert item.extra_args == {} + @pytest.mark.parametrize( 'data_type, expected_function', [ @@ -178,11 +185,12 @@ def test_met_config_info(name, data_type, mp_configs, extra_args): ('bad_name', None), ] ) +@pytest.mark.util def test_set_met_config_function(data_type, expected_function): try: function_found = set_met_config_function(data_type) function_name = function_found.__name__ if function_found else None - assert(function_name == expected_function) + assert function_name == expected_function except ValueError: assert expected_function is None @@ -196,9 +204,11 @@ def test_set_met_config_function(data_type, expected_function): ('G002', '"G002"'), ] ) +@pytest.mark.util def test_format_regrid_to_grid(input, output): assert format_regrid_to_grid(input) == output + @pytest.mark.parametrize( 'config_overrides, expected_value', [ # 0 no climo variables set @@ -232,6 +242,7 @@ def test_format_regrid_to_grid(input, output): 'PYTHON_XARRAY'), ] ) +@pytest.mark.util def test_read_climo_file_name(metplus_config, config_overrides, expected_value): # name of app used for testing to read/set config variables diff --git a/internal_tests/pytests/met_util/test_met_util.py b/internal_tests/pytests/util/met_util/test_met_util.py similarity index 91% rename from internal_tests/pytests/met_util/test_met_util.py rename to internal_tests/pytests/util/met_util/test_met_util.py index ccc7d6bcd9..8241ea4528 100644 --- a/internal_tests/pytests/met_util/test_met_util.py +++ b/internal_tests/pytests/util/met_util/test_met_util.py @@ -1,16 +1,17 @@ #!/usr/bin/env python3 -import sys +import pytest + import datetime import os from dateutil.relativedelta import relativedelta import pprint -import pytest from metplus.util import met_util as util from metplus.util import time_util from metplus.util.config_metplus import parse_var_list + @pytest.mark.parametrize( 'key, value', [ ({"gt2.3", "gt5.5"}, True), @@ -43,14 +44,15 @@ ([">SFP70", ">SFP80", ">SFP90", ">SFP95"], True), ] ) +@pytest.mark.util def test_threshold(key, value): - assert(util.validate_thresholds(key) == value) + assert util.validate_thresholds(key) == value + # parses a threshold and returns a list of tuples of # comparison and number, i.e.: # 'gt4' => [('gt', 4)] # gt4&<5 => [('gt', 4), ('lt', 5)] - @pytest.mark.parametrize( 'key, value', [ ('gt4', [('gt', 4)]), @@ -81,8 +83,10 @@ def test_threshold(key, value): ("1', 'gt1'), @@ -223,13 +171,16 @@ def test_set_lists_as_loop_or_group(metplus_config): 'lt805,lt1609,lt4828,lt8045,ge8045,lt16090'), ] ) +@pytest.mark.plotting def test_format_thresh(metplus_config, expression, expected_result): - # Idependently test the creation of + # Independently test the creation of # string values for defining thresholds st = stat_analysis_wrapper(metplus_config) - assert(st.format_thresh(expression) == expected_result) + assert st.format_thresh(expression) == expected_result + +@pytest.mark.plotting def test_build_stringsub_dict(metplus_config): # Independently test the building of # the dictionary used in the stringtemplate @@ -431,7 +382,9 @@ def test_build_stringsub_dict(metplus_config): datetime.datetime(1900, 1, 1, 0, 0, 0)) assert(test_stringsub_dict['obs_init_hour_end'] == datetime.datetime(1900, 1, 1, 23, 59 ,59)) - + + +@pytest.mark.plotting def test_get_output_filename(metplus_config): # Independently test the building of # the output file name @@ -488,7 +441,7 @@ def test_get_output_filename(metplus_config): lists_to_loop, lists_to_group, config_dict) - assert(expected_output_filename == test_output_filename) + assert expected_output_filename == test_output_filename # Test 2 expected_output_filename = ( 'MODEL_TEST_MODEL_TEST_ANL_' @@ -508,7 +461,7 @@ def test_get_output_filename(metplus_config): lists_to_loop, lists_to_group, config_dict) - assert(expected_output_filename == test_output_filename) + assert expected_output_filename == test_output_filename # Test 3 expected_output_filename = ( 'MODEL_TEST_MODEL_TEST_ANL' @@ -528,7 +481,7 @@ def test_get_output_filename(metplus_config): lists_to_loop, lists_to_group, config_dict) - assert(expected_output_filename == test_output_filename) + assert expected_output_filename == test_output_filename # Test 4 expected_output_filename = ( 'MODEL_TEST_MODEL_TEST_ANL' @@ -546,8 +499,10 @@ def test_get_output_filename(metplus_config): lists_to_loop, lists_to_group, config_dict) - assert(expected_output_filename == test_output_filename) + assert expected_output_filename == test_output_filename + +@pytest.mark.plotting def test_get_lookin_dir(metplus_config): # Independently test the building of # the lookin directory @@ -593,48 +548,44 @@ def test_get_lookin_dir(metplus_config): 'OBS_THRESH_LIST', 'COV_THRESH_LIST', 'ALPHA_LIST', 'LINE_TYPE_LIST' ] lists_to_loop = [ 'FCST_VALID_HOUR_LIST', 'MODEL_LIST' ] - stat_analysis_pytest_dir = os.path.dirname(__file__) + pytest_data_dir = os.path.join(os.path.dirname(__file__), os.pardir, + os.pardir, os.pardir, 'data') # Test 1 - expected_lookin_dir = os.path.join(stat_analysis_pytest_dir, - '../../data/fake/20180201') - dir_path = os.path.join(stat_analysis_pytest_dir, - '../../data/fake/*') + expected_lookin_dir = os.path.join(pytest_data_dir, 'fake/20180201') + dir_path = os.path.join(pytest_data_dir, 'fake/*') test_lookin_dir = st.get_lookin_dir(dir_path, lists_to_loop, lists_to_group, config_dict) - assert(expected_lookin_dir == test_lookin_dir) + assert expected_lookin_dir == test_lookin_dir # Test 2 - expected_lookin_dir = os.path.join(stat_analysis_pytest_dir, - '../../data/fake/20180201') - dir_path = os.path.join(stat_analysis_pytest_dir, - '../../data/fake/{valid?fmt=%Y%m%d}') + expected_lookin_dir = os.path.join(pytest_data_dir, 'fake/20180201') + dir_path = os.path.join(pytest_data_dir, 'fake/{valid?fmt=%Y%m%d}') test_lookin_dir = st.get_lookin_dir(dir_path, lists_to_loop, lists_to_group, config_dict) - assert(expected_lookin_dir == test_lookin_dir) + assert expected_lookin_dir == test_lookin_dir # Test 3 - no matches for lookin dir wildcard expected_lookin_dir = '' - dir_path = os.path.join(stat_analysis_pytest_dir, - '../../data/fake/*nothingmatches*') + dir_path = os.path.join(pytest_data_dir, 'fake/*nothingmatches*') test_lookin_dir = st.get_lookin_dir(dir_path, lists_to_loop, lists_to_group, config_dict) - assert(expected_lookin_dir == test_lookin_dir) + assert expected_lookin_dir == test_lookin_dir # Test 4 - 2 paths, one with wildcard - expected_lookin_dir = os.path.join(stat_analysis_pytest_dir, - '../../data/fake/20180201') + expected_lookin_dir = os.path.join(pytest_data_dir, 'fake/20180201') expected_lookin_dir = f'{expected_lookin_dir} {expected_lookin_dir}' - dir_path = os.path.join(stat_analysis_pytest_dir, - '../../data/fake/*') + dir_path = os.path.join(pytest_data_dir, 'fake/*') dir_path = f'{dir_path}, {dir_path}' test_lookin_dir = st.get_lookin_dir(dir_path, lists_to_loop, lists_to_group, config_dict) - assert(expected_lookin_dir == test_lookin_dir) + assert expected_lookin_dir == test_lookin_dir + +@pytest.mark.plotting def test_format_valid_init(metplus_config): # Independently test the formatting # of the valid and initialization date and hours @@ -653,18 +604,18 @@ def test_format_valid_init(metplus_config): config_dict['OBS_VALID_HOUR'] = '' config_dict['OBS_INIT_HOUR'] = '' config_dict = st.format_valid_init(config_dict) - assert(config_dict['FCST_VALID_BEG'] == '20190101_000000') - assert(config_dict['FCST_VALID_END'] == '20190105_000000') - assert(config_dict['FCST_VALID_HOUR'] == '"000000"') - assert(config_dict['FCST_INIT_BEG'] == '') - assert(config_dict['FCST_INIT_END'] == '') - assert(config_dict['FCST_INIT_HOUR'] == '"000000", "120000"') - assert(config_dict['OBS_VALID_BEG'] == '') - assert(config_dict['OBS_VALID_END'] == '') - assert(config_dict['OBS_VALID_HOUR'] == '') - assert(config_dict['OBS_INIT_BEG'] == '') - assert(config_dict['OBS_INIT_END'] == '') - assert(config_dict['OBS_INIT_HOUR'] == '') + assert config_dict['FCST_VALID_BEG'] == '20190101_000000' + assert config_dict['FCST_VALID_END'] == '20190105_000000' + assert config_dict['FCST_VALID_HOUR'] == '"000000"' + assert config_dict['FCST_INIT_BEG'] == '' + assert config_dict['FCST_INIT_END'] == '' + assert config_dict['FCST_INIT_HOUR'] == '"000000", "120000"' + assert config_dict['OBS_VALID_BEG'] == '' + assert config_dict['OBS_VALID_END'] == '' + assert config_dict['OBS_VALID_HOUR'] == '' + assert config_dict['OBS_INIT_BEG'] == '' + assert config_dict['OBS_INIT_END'] == '' + assert config_dict['OBS_INIT_HOUR'] == '' # Test 2 st.c_dict['DATE_BEG'] = '20190101' st.c_dict['DATE_END'] = '20190105' @@ -676,18 +627,18 @@ def test_format_valid_init(metplus_config): config_dict['OBS_VALID_HOUR'] = '' config_dict['OBS_INIT_HOUR'] = '' config_dict = st.format_valid_init(config_dict) - assert(config_dict['FCST_VALID_BEG'] == '20190101_000000') - assert(config_dict['FCST_VALID_END'] == '20190105_120000') - assert(config_dict['FCST_VALID_HOUR'] == '"000000", "120000"') - assert(config_dict['FCST_INIT_BEG'] == '') - assert(config_dict['FCST_INIT_END'] == '') - assert(config_dict['FCST_INIT_HOUR'] == '"000000", "120000"') - assert(config_dict['OBS_VALID_BEG'] == '') - assert(config_dict['OBS_VALID_END'] == '') - assert(config_dict['OBS_VALID_HOUR'] == '') - assert(config_dict['OBS_INIT_BEG'] == '') - assert(config_dict['OBS_INIT_END'] == '') - assert(config_dict['OBS_INIT_HOUR'] == '') + assert config_dict['FCST_VALID_BEG'] == '20190101_000000' + assert config_dict['FCST_VALID_END'] == '20190105_120000' + assert config_dict['FCST_VALID_HOUR'] == '"000000", "120000"' + assert config_dict['FCST_INIT_BEG'] == '' + assert config_dict['FCST_INIT_END'] == '' + assert config_dict['FCST_INIT_HOUR'] == '"000000", "120000"' + assert config_dict['OBS_VALID_BEG'] == '' + assert config_dict['OBS_VALID_END'] == '' + assert config_dict['OBS_VALID_HOUR'] == '' + assert config_dict['OBS_INIT_BEG'] == '' + assert config_dict['OBS_INIT_END'] == '' + assert config_dict['OBS_INIT_HOUR'] == '' # Test 3 st.c_dict['DATE_BEG'] = '20190101' st.c_dict['DATE_END'] = '20190101' @@ -699,18 +650,18 @@ def test_format_valid_init(metplus_config): config_dict['OBS_VALID_HOUR'] = '000000' config_dict['OBS_INIT_HOUR'] = '"000000", "120000"' config_dict = st.format_valid_init(config_dict) - assert(config_dict['FCST_VALID_BEG'] == '') - assert(config_dict['FCST_VALID_END'] == '') - assert(config_dict['FCST_VALID_HOUR'] == '') - assert(config_dict['FCST_INIT_BEG'] == '') - assert(config_dict['FCST_INIT_END'] == '') - assert(config_dict['FCST_INIT_HOUR'] == '') - assert(config_dict['OBS_VALID_BEG'] == '20190101_000000') - assert(config_dict['OBS_VALID_END'] == '20190101_000000') - assert(config_dict['OBS_VALID_HOUR'] == '"000000"') - assert(config_dict['OBS_INIT_BEG'] == '') - assert(config_dict['OBS_INIT_END'] == '') - assert(config_dict['OBS_INIT_HOUR'] == '"000000", "120000"') + assert config_dict['FCST_VALID_BEG'] == '' + assert config_dict['FCST_VALID_END'] == '' + assert config_dict['FCST_VALID_HOUR'] == '' + assert config_dict['FCST_INIT_BEG'] == '' + assert config_dict['FCST_INIT_END'] == '' + assert config_dict['FCST_INIT_HOUR'] == '' + assert config_dict['OBS_VALID_BEG'] == '20190101_000000' + assert config_dict['OBS_VALID_END'] == '20190101_000000' + assert config_dict['OBS_VALID_HOUR'] == '"000000"' + assert config_dict['OBS_INIT_BEG'] == '' + assert config_dict['OBS_INIT_END'] == '' + assert config_dict['OBS_INIT_HOUR'] == '"000000", "120000"' # Test 3 st.c_dict['DATE_BEG'] = '20190101' st.c_dict['DATE_END'] = '20190101' @@ -722,19 +673,21 @@ def test_format_valid_init(metplus_config): config_dict['OBS_VALID_HOUR'] = '000000' config_dict['OBS_INIT_HOUR'] = '"000000", "120000"' config_dict = st.format_valid_init(config_dict) - assert(config_dict['FCST_VALID_BEG'] == '') - assert(config_dict['FCST_VALID_END'] == '') - assert(config_dict['FCST_VALID_HOUR'] == '') - assert(config_dict['FCST_INIT_BEG'] == '') - assert(config_dict['FCST_INIT_END'] == '') - assert(config_dict['FCST_INIT_HOUR'] == '') - assert(config_dict['OBS_VALID_BEG'] == '') - assert(config_dict['OBS_VALID_END'] == '') - assert(config_dict['OBS_VALID_HOUR'] == '"000000"') - assert(config_dict['OBS_INIT_BEG'] == '20190101_000000') - assert(config_dict['OBS_INIT_END'] == '20190101_120000') - assert(config_dict['OBS_INIT_HOUR'] == '"000000", "120000"') - + assert config_dict['FCST_VALID_BEG'] == '' + assert config_dict['FCST_VALID_END'] == '' + assert config_dict['FCST_VALID_HOUR'] == '' + assert config_dict['FCST_INIT_BEG'] == '' + assert config_dict['FCST_INIT_END'] == '' + assert config_dict['FCST_INIT_HOUR'] == '' + assert config_dict['OBS_VALID_BEG'] == '' + assert config_dict['OBS_VALID_END'] == '' + assert config_dict['OBS_VALID_HOUR'] == '"000000"' + assert config_dict['OBS_INIT_BEG'] == '20190101_000000' + assert config_dict['OBS_INIT_END'] == '20190101_120000' + assert config_dict['OBS_INIT_HOUR'] == '"000000", "120000"' + + +@pytest.mark.plotting def test_parse_model_info(metplus_config): # Independently test the creation of # the model information dictionary @@ -759,19 +712,16 @@ def test_parse_model_info(metplus_config): expected_out_stat_filename_type = 'user' test_model_info_list = st.parse_model_info() - assert(test_model_info_list[0]['name'] == expected_name) - assert(test_model_info_list[0]['reference_name'] == - expected_reference_name) - assert(test_model_info_list[0]['obtype'] == expected_obtype) - assert(test_model_info_list[0]['dump_row_filename_template'] == - expected_dump_row_filename_template) - assert(test_model_info_list[0]['dump_row_filename_type'] == - expected_dump_row_filename_type) - assert(test_model_info_list[0]['out_stat_filename_template'] == - expected_out_stat_filename_template) - assert(test_model_info_list[0]['out_stat_filename_type'] == - expected_out_stat_filename_type) + assert test_model_info_list[0]['name'] == expected_name + assert test_model_info_list[0]['reference_name'] == expected_reference_name + assert test_model_info_list[0]['obtype'] == expected_obtype + assert test_model_info_list[0]['dump_row_filename_template'] == expected_dump_row_filename_template + assert test_model_info_list[0]['dump_row_filename_type'] == expected_dump_row_filename_type + assert test_model_info_list[0]['out_stat_filename_template'] == expected_out_stat_filename_template + assert test_model_info_list[0]['out_stat_filename_type'] == expected_out_stat_filename_type + +@pytest.mark.plotting def test_run_stat_analysis(metplus_config): # Test running of stat_analysis st = stat_analysis_wrapper(metplus_config) @@ -784,9 +734,9 @@ def test_run_stat_analysis(metplus_config): st.c_dict['DATE_END'] = '20190101' st.c_dict['DATE_TYPE'] = 'VALID' st.run_stat_analysis() - assert(os.path.exists(expected_filename)) - assert(os.path.getsize(expected_filename) - == os.path.getsize(comparison_filename)) + assert os.path.exists(expected_filename) + assert os.path.getsize(expected_filename) == os.path.getsize(comparison_filename) + @pytest.mark.parametrize( 'data_type, config_list, expected_list', [ @@ -800,14 +750,17 @@ def test_run_stat_analysis(metplus_config): ('OBS', '\"(0,*,*)\", \"(1,*,*)\"', ["0,*,*", "1,*,*"]), ] ) +@pytest.mark.plotting def test_get_level_list(metplus_config, data_type, config_list, expected_list): config = metplus_config() config.set('config', f'{data_type}_LEVEL_LIST', config_list) saw = StatAnalysisWrapper(config) - assert(saw.get_level_list(data_type) == expected_list) + assert saw.get_level_list(data_type) == expected_list + +@pytest.mark.plotting def test_get_config_file(metplus_config): fake_config_name = '/my/config/file' config = metplus_config() diff --git a/internal_tests/pytests/stat_analysis/test_stat_analysis_plotting.py b/internal_tests/pytests/wrappers/stat_analysis/test_stat_analysis_plotting.py similarity index 77% rename from internal_tests/pytests/stat_analysis/test_stat_analysis_plotting.py rename to internal_tests/pytests/wrappers/stat_analysis/test_stat_analysis_plotting.py index 01ed2be651..e869b5d580 100644 --- a/internal_tests/pytests/stat_analysis/test_stat_analysis_plotting.py +++ b/internal_tests/pytests/wrappers/stat_analysis/test_stat_analysis_plotting.py @@ -1,48 +1,17 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 -import os -import datetime -import sys -import logging import pytest -import datetime -import glob - -import produtil.setup -from metplus.wrappers.stat_analysis_wrapper import StatAnalysisWrapper -from metplus.util import met_util as util +import os -# -# These are tests (not necessarily unit tests) for the -# MET stat_analysis wrapper, stat_analysis_wrapper.py -# NOTE: This test requires pytest, which is NOT part of the standard Python -# library. -# These tests require one configuration file in addition to the three -# required METplus configuration files: test_stat_analysis.conf. This contains -# the information necessary for running all the tests. Each test can be -# customized to replace various settings if needed. -# +import glob -# -# -----------Mandatory----------- -# configuration and fixture to support METplus configuration files beyond -# the metplus_data, metplus_system, and metplus_runtime conf files. -# +from metplus.wrappers.stat_analysis_wrapper import StatAnalysisWrapper +from metplus.util import handle_tmp_dir +METPLUS_BASE = os.getcwd().split('/internal_tests')[0] -# Add a test configuration -def pytest_addoption(parser): - parser.addoption("-c", action="store", help=" -c ") -# @pytest.fixture -def cmdopt(request): - return request.config.getoption("-c") - -# -# ------------Pytest fixtures that can be used for all tests --------------- -# -#@pytest.fixture def stat_analysis_wrapper(metplus_config): """! Returns a default StatAnalysisWrapper with /path/to entries in the metplus_system.conf and metplus_runtime.conf configuration @@ -54,39 +23,11 @@ def stat_analysis_wrapper(metplus_config): extra_configs = [] extra_configs.append(os.path.join(os.path.dirname(__file__), 'test_plotting.conf')) config = metplus_config(extra_configs) - util.handle_tmp_dir(config) + handle_tmp_dir(config) return StatAnalysisWrapper(config) -# ------------------TESTS GO BELOW --------------------------- -# - -#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -# To test numerous files for filesize, use parametrization: -# @pytest.mark.parametrize( -# 'key, value', [ -# ('/usr/local/met-6.1/bin/point_stat', 382180), -# ('/usr/local/met-6.1/bin/stat_analysis', 3438944), -# ('/usr/local/met-6.1/bin/pb2nc', 3009056) -# -# ] -# ) -# def test_file_sizes(key, value): -# st = stat_analysis_wrapper() -# # Retrieve the value of the class attribute that corresponds -# # to the key in the parametrization -# files_in_dir = [] -# for dirpath, dirnames, files in os.walk("/usr/local/met-6.1/bin"): -# for name in files: -# files_in_dir.append(os.path.join(dirpath, name)) -# if actual_key in files_in_dir: -# # The actual_key is one of the files of interest we retrieved from -# # the output directory. Verify that it's file size is what we -# # expected. -# assert actual_key == key -#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -METPLUS_BASE = os.getcwd().split('/internal_tests')[0] - +@pytest.mark.plotting def test_set_lists_as_loop_or_group(metplus_config): # Independently test that the lists that are set # in the config file are being set @@ -148,6 +89,7 @@ def test_set_lists_as_loop_or_group(metplus_config): for elem in test_lists_to_loop_items)) +@pytest.mark.plotting def test_get_output_filename(metplus_config): # Independently test the building of # the output file name @@ -218,66 +160,10 @@ def test_get_output_filename(metplus_config): lists_to_loop, lists_to_group, config_dict) - assert (expected_output_filename == test_output_filename) - + assert expected_output_filename == test_output_filename -def test_parse_model_info(metplus_config): - pytest.skip("This function will be removed from MakePlots") - # Independently test the creation of - # the model information dictionary - # and the reading from the config file - # are as expected - st = stat_analysis_wrapper(metplus_config) - # Test 1 - expected_name1 = 'MODEL_TEST1' - expected_reference_name1 = 'MODEL_TEST1' - expected_obtype1 = 'MODEL_TEST1_ANL' - expected_dump_row_filename_template1 = ( - '{model?fmt=%s}_{obtype?fmt=%s}_valid{valid_beg?fmt=%Y%m%d}' - 'to{valid_end?fmt=%Y%m%d}_valid{valid_hour_beg?fmt=%H%M}to' - '{valid_hour_end?fmt=%H%M}Z_init{init_hour_beg?fmt=%H%M}to' - '{init_hour_end?fmt=%H%M}Z_fcst_lead{fcst_lead?fmt=%s}_' - 'fcst{fcst_var?fmt=%s}{fcst_level?fmt=%s}{fcst_thresh?fmt=%s}' - '{interp_mthd?fmt=%s}_obs{obs_var?fmt=%s}{obs_level?fmt=%s}' - '{obs_thresh?fmt=%s}{interp_mthd?fmt=%s}_vxmask{vx_mask?fmt=%s}' - '_dump_row.stat' - ) - expected_dump_row_filename_type1 = 'user' - expected_out_stat_filename_template1 = 'NA' - expected_out_stat_filename_type1 = 'NA' - expected_name2 = 'TEST2_MODEL' - expected_reference_name2 = 'TEST2_MODEL' - expected_obtype2 = 'ANLYS2' - expected_dump_row_filename_template2 = expected_dump_row_filename_template1 - expected_dump_row_filename_type2 = 'user' - expected_out_stat_filename_template2 = 'NA' - expected_out_stat_filename_type2 = 'NA' - test_model_info_list = st.parse_model_info() - assert (test_model_info_list[0]['name'] == expected_name1) - assert (test_model_info_list[0]['reference_name'] == - expected_reference_name1) - assert (test_model_info_list[0]['obtype'] == expected_obtype1) - assert (test_model_info_list[0]['dump_row_filename_template'] == - expected_dump_row_filename_template1) - assert (test_model_info_list[0]['dump_row_filename_type'] == - expected_dump_row_filename_type1) - assert (test_model_info_list[0]['out_stat_filename_template'] == - expected_out_stat_filename_template1) - assert (test_model_info_list[0]['out_stat_filename_type'] == - expected_out_stat_filename_type1) - assert (test_model_info_list[1]['name'] == expected_name2) - assert (test_model_info_list[1]['reference_name'] == - expected_reference_name2) - assert (test_model_info_list[1]['obtype'] == expected_obtype2) - assert (test_model_info_list[1]['dump_row_filename_template'] == - expected_dump_row_filename_template2) - assert (test_model_info_list[1]['dump_row_filename_type'] == - expected_dump_row_filename_type2) - assert (test_model_info_list[1]['out_stat_filename_template'] == - expected_out_stat_filename_template2) - assert (test_model_info_list[1]['out_stat_filename_type'] == - expected_out_stat_filename_type2) +@pytest.mark.plotting def test_filter_for_plotting(metplus_config): # Test running of stat_analysis st = stat_analysis_wrapper(metplus_config) @@ -510,6 +396,6 @@ def test_filter_for_plotting(metplus_config): os.listdir(st.config.getdir('OUTPUT_BASE') +'/plotting/stat_analysis') ) - assert(ntest_files == 32) + assert ntest_files == 32 for expected_filename in expected_filename_list: - assert(os.path.exists(expected_filename)) + assert os.path.exists(expected_filename) diff --git a/internal_tests/pytests/tc_gen/test_tc_gen_wrapper.py b/internal_tests/pytests/wrappers/tc_gen/test_tc_gen_wrapper.py similarity index 97% rename from internal_tests/pytests/tc_gen/test_tc_gen_wrapper.py rename to internal_tests/pytests/wrappers/tc_gen/test_tc_gen_wrapper.py index 575fd2bf6e..d424abbeae 100644 --- a/internal_tests/pytests/tc_gen/test_tc_gen_wrapper.py +++ b/internal_tests/pytests/wrappers/tc_gen/test_tc_gen_wrapper.py @@ -1,12 +1,12 @@ #!/usr/bin/env python3 -import os -import sys import pytest -import datetime + +import os from metplus.wrappers.tc_gen_wrapper import TCGenWrapper + @pytest.mark.parametrize( 'config_overrides, env_var_values', [ @@ -285,6 +285,7 @@ ] ) +@pytest.mark.wrapper_a def test_tc_gen(metplus_config, config_overrides, env_var_values): # expected number of 2016 files (including file_list line) expected_genesis_count = 7 @@ -382,11 +383,11 @@ def test_tc_gen(metplus_config, config_overrides, env_var_values): all_cmds = wrapper.run_all_times() print(f"ALL COMMANDS: {all_cmds}") - assert(len(all_cmds) == len(expected_cmds)) + assert len(all_cmds) == len(expected_cmds) for (cmd, env_vars), expected_cmd in zip(all_cmds, expected_cmds): # ensure commands are generated as expected - assert(cmd == expected_cmd) + assert cmd == expected_cmd # check that environment variables were set properly # including deprecated env vars (not in wrapper env var keys) @@ -396,27 +397,29 @@ def test_tc_gen(metplus_config, config_overrides, env_var_values): for env_var_key in env_var_keys: match = next((item for item in env_vars if item.startswith(env_var_key)), None) - assert(match is not None) + assert match is not None value = match.split('=', 1)[1] - assert(env_var_values.get(env_var_key, '') == value) + assert env_var_values.get(env_var_key, '') == value # verify file count of genesis, edeck, shape, and track file list files with open(genesis_path, 'r') as file_handle: lines = file_handle.read().splitlines() - assert(len(lines) == expected_genesis_count) + assert len(lines) == expected_genesis_count with open(edeck_path, 'r') as file_handle: lines = file_handle.read().splitlines() - assert(len(lines) == expected_edeck_count) + assert len(lines) == expected_edeck_count with open(shape_path, 'r') as file_handle: lines = file_handle.read().splitlines() - assert(len(lines) == expected_shape_count) + assert len(lines) == expected_shape_count with open(track_path, 'r') as file_handle: lines = file_handle.read().splitlines() - assert(len(lines) == expected_track_count) + assert len(lines) == expected_track_count + +@pytest.mark.wrapper_a def test_get_config_file(metplus_config): fake_config_name = '/my/config/file' diff --git a/internal_tests/pytests/tc_pairs/tc_pairs_wrapper_test.conf b/internal_tests/pytests/wrappers/tc_pairs/tc_pairs_wrapper_test.conf similarity index 100% rename from internal_tests/pytests/tc_pairs/tc_pairs_wrapper_test.conf rename to internal_tests/pytests/wrappers/tc_pairs/tc_pairs_wrapper_test.conf diff --git a/internal_tests/pytests/tc_pairs/test_tc_pairs_wrapper.py b/internal_tests/pytests/wrappers/tc_pairs/test_tc_pairs_wrapper.py similarity index 96% rename from internal_tests/pytests/tc_pairs/test_tc_pairs_wrapper.py rename to internal_tests/pytests/wrappers/tc_pairs/test_tc_pairs_wrapper.py index a0845be227..265564a29b 100644 --- a/internal_tests/pytests/tc_pairs/test_tc_pairs_wrapper.py +++ b/internal_tests/pytests/wrappers/tc_pairs/test_tc_pairs_wrapper.py @@ -1,8 +1,9 @@ #!/usr/bin/env python3 +import pytest + import os from datetime import datetime -import pytest from metplus.wrappers.tc_pairs_wrapper import TCPairsWrapper @@ -15,6 +16,7 @@ time_fmt = '%Y%m%d%H' run_times = ['2014121318'] + def set_minimum_config_settings(config, loop_by='INIT'): # set config variables to prevent command from running and bypass check # if input files actually exist @@ -41,6 +43,7 @@ def set_minimum_config_settings(config, loop_by='INIT'): # can set adeck or edeck variables config.set('config', 'TC_PAIRS_ADECK_TEMPLATE', adeck_template) + @pytest.mark.parametrize( 'config_overrides, isOK', [ ({}, True), @@ -80,6 +83,7 @@ def test_read_storm_info(metplus_config, config_overrides, isOK): ('2020100700_F000_261N_1101W_FOF', 'wildcard', 'wildcard'), ] ) +@pytest.mark.wrapper def test_parse_storm_id(metplus_config, storm_id, basin, cyclone): """! Check that storm ID is parsed properly to get basin and cyclone. Check that it returns wildcard expressions basin and cyclone cannot be @@ -107,6 +111,7 @@ def test_parse_storm_id(metplus_config, storm_id, basin, cyclone): assert actual_basin == expected_basin assert actual_cyclone == expected_cyclone + @pytest.mark.parametrize( 'basin,cyclone,expected_files,expected_wildcard', [ ('al', '0104', ['get_bdeck_balq2014123118.gfso.0104'], False), @@ -123,6 +128,7 @@ def test_parse_storm_id(metplus_config, storm_id, basin, cyclone): 'get_bdeck_bmlq2014123118.gfso.0105'], True), ] ) +@pytest.mark.wrapper def test_get_bdeck(metplus_config, basin, cyclone, expected_files, expected_wildcard): """! Checks that the correct list of empty test files are found and the @@ -150,11 +156,12 @@ def test_get_bdeck(metplus_config, basin, cyclone, expected_files, wrapper = TCPairsWrapper(config) actual_files, actual_wildcard = wrapper._get_bdeck(basin, cyclone, time_info) - assert(actual_wildcard == expected_wildcard) - assert(len(actual_files) == len(expected_files)) + assert actual_wildcard == expected_wildcard + assert len(actual_files) == len(expected_files) for actual_file, expected_file in zip(sorted(actual_files), sorted(expected_files)): - assert(os.path.basename(actual_file) == expected_file) + assert os.path.basename(actual_file) == expected_file + @pytest.mark.parametrize( 'template, filename,other_basin,other_cyclone', [ @@ -178,6 +185,7 @@ def test_get_bdeck(metplus_config, basin, cyclone, expected_files, '20141009bml.dat', 'ml', None), ] ) +@pytest.mark.wrapper def test_get_basin_cyclone_from_bdeck(metplus_config, template, filename, other_cyclone, other_basin): fake_dir = '/fake/dir' @@ -210,6 +218,7 @@ def test_get_basin_cyclone_from_bdeck(metplus_config, template, filename, assert actual_basin == expected_basin assert actual_cyclone == expected_cyclone + @pytest.mark.parametrize( 'config_overrides, storm_type, values_to_check', [ # 0: storm_id @@ -231,6 +240,7 @@ def test_get_basin_cyclone_from_bdeck(metplus_config, template, filename, 'cyclone', ['09', '10', '09', '10']), ] ) +@pytest.mark.wrapper def test_tc_pairs_storm_id_lists(metplus_config, config_overrides, storm_type, values_to_check): config = metplus_config() @@ -272,20 +282,21 @@ def test_tc_pairs_storm_id_lists(metplus_config, config_overrides, print(f"CMD{idx}: {cmd}") print(f"ENV{idx}: {env_list}") - assert(len(all_cmds) == len(values_to_check)) + assert len(all_cmds) == len(values_to_check) for (cmd, env_vars), value_to_check in zip(all_cmds, values_to_check): env_var_key = f'METPLUS_{storm_type.upper()}' match = next((item for item in env_vars if item.startswith(env_var_key)), None) - assert (match is not None) + assert match is not None print(f"Checking env var: {env_var_key}") actual_value = match.split('=', 1)[1] expected_value = f'{storm_type} = ["{value_to_check}"];' assert actual_value == expected_value + @pytest.mark.parametrize( 'config_overrides, env_var_values', [ # 0: no config overrides that set env vars @@ -370,6 +381,7 @@ def test_tc_pairs_storm_id_lists(metplus_config, config_overrides, ] ) +@pytest.mark.wrapper def test_tc_pairs_loop_order_processes(metplus_config, config_overrides, env_var_values): # run using init and valid time variables @@ -425,26 +437,27 @@ def test_tc_pairs_loop_order_processes(metplus_config, config_overrides, all_cmds = wrapper.run_all_times() print(f"ALL COMMANDS: {all_cmds}") - assert(len(all_cmds) == len(expected_cmds)) + assert len(all_cmds) == len(expected_cmds) for (cmd, env_vars), expected_cmd in zip(all_cmds, expected_cmds): # ensure commands are generated as expected - assert(cmd == expected_cmd) + assert cmd == expected_cmd # check that environment variables were set properly for env_var_key in wrapper.WRAPPER_ENV_VAR_KEYS: match = next((item for item in env_vars if item.startswith(env_var_key)), None) - assert(match is not None) + assert match is not None print(f'Checking env var: {env_var_key}') actual_value = match.split('=', 1)[1] - assert(env_var_values.get(env_var_key, '') == actual_value) + assert env_var_values.get(env_var_key, '') == actual_value if remove_beg: del env_var_values[f'METPLUS_{loop_by}_BEG'] if remove_end: del env_var_values[f'METPLUS_{loop_by}_END'] + @pytest.mark.parametrize( 'config_overrides, env_var_values', [ # 0: no config overrides that set env vars @@ -460,6 +473,7 @@ def test_tc_pairs_loop_order_processes(metplus_config, config_overrides, {'METPLUS_CYCLONE': 'cyclone = ["1005", "0104"];'}), ] ) +@pytest.mark.wrapper def test_tc_pairs_read_all_files(metplus_config, config_overrides, env_var_values): # run using init and valid time variables @@ -512,22 +526,24 @@ def test_tc_pairs_read_all_files(metplus_config, config_overrides, all_cmds = wrapper.run_all_times() print(f"ALL COMMANDS: {all_cmds}") - assert(len(all_cmds) == len(expected_cmds)) + assert len(all_cmds) == len(expected_cmds) for (cmd, env_vars), expected_cmd in zip(all_cmds, expected_cmds): # check that environment variables were set properly for env_var_key in wrapper.WRAPPER_ENV_VAR_KEYS: match = next((item for item in env_vars if item.startswith(env_var_key)), None) - assert(match is not None) + assert match is not None print(f'Checking env var: {env_var_key}') actual_value = match.split('=', 1)[1] - assert(env_var_values.get(env_var_key, '') == actual_value) + assert env_var_values.get(env_var_key, '') == actual_value # unset begin and end for next loop del env_var_values[f'METPLUS_{loop_by}_BEG'] del env_var_values[f'METPLUS_{loop_by}_END'] + +@pytest.mark.wrapper def test_get_config_file(metplus_config): fake_config_name = '/my/config/file' diff --git a/internal_tests/pytests/tc_stat/tc_stat_conf.conf b/internal_tests/pytests/wrappers/tc_stat/tc_stat_conf.conf similarity index 100% rename from internal_tests/pytests/tc_stat/tc_stat_conf.conf rename to internal_tests/pytests/wrappers/tc_stat/tc_stat_conf.conf diff --git a/internal_tests/pytests/tc_stat/test_tc_stat_wrapper.py b/internal_tests/pytests/wrappers/tc_stat/test_tc_stat_wrapper.py similarity index 98% rename from internal_tests/pytests/tc_stat/test_tc_stat_wrapper.py rename to internal_tests/pytests/wrappers/tc_stat/test_tc_stat_wrapper.py index e84b90b09a..fe66cff3f8 100644 --- a/internal_tests/pytests/tc_stat/test_tc_stat_wrapper.py +++ b/internal_tests/pytests/wrappers/tc_stat/test_tc_stat_wrapper.py @@ -1,21 +1,22 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 + +import pytest import os import sys -import pytest import datetime -import produtil - from metplus.wrappers.tc_stat_wrapper import TCStatWrapper from metplus.util import ti_calculate + def get_config(metplus_config): extra_configs = [] extra_configs.append(os.path.join(os.path.dirname(__file__), 'tc_stat_conf.conf')) return metplus_config(extra_configs) + def tc_stat_wrapper(metplus_config): """! Returns a default TCStatWrapper with /path/to entries in the metplus_system.conf and metplus_runtime.conf configuration @@ -27,6 +28,7 @@ def tc_stat_wrapper(metplus_config): config = get_config(metplus_config) return TCStatWrapper(config) + @pytest.mark.parametrize( 'overrides, c_dict', [ ({'TC_STAT_INIT_BEG': '20150301', @@ -106,7 +108,8 @@ def tc_stat_wrapper(metplus_config): 'INIT_STR_EXC_VAL': 'init_str_exc_val = ["HUWARN"];'}), ] - ) +) +@pytest.mark.wrapper def test_override_config_in_c_dict(metplus_config, overrides, c_dict): config = get_config(metplus_config) instance = 'tc_stat_overrides' @@ -119,6 +122,7 @@ def test_override_config_in_c_dict(metplus_config, overrides, c_dict): assert (wrapper.env_var_dict.get(f'METPLUS_{key}') == expected_value or wrapper.c_dict.get(key) == expected_value) + @pytest.mark.parametrize( 'jobs, init_dt, expected_output', [ # single fake job @@ -143,6 +147,7 @@ def test_override_config_in_c_dict(metplus_config, overrides, c_dict): ), ] ) +@pytest.mark.wrapper def test_handle_jobs(metplus_config, jobs, init_dt, expected_output): if init_dt: time_info = ti_calculate({'init': init_dt}) @@ -158,7 +163,7 @@ def test_handle_jobs(metplus_config, jobs, init_dt, expected_output): wrapper.c_dict['JOBS'].append(job.replace('', output_dir)) output = wrapper.handle_jobs(time_info) - assert(output == expected_output.replace('', output_dir)) + assert output == expected_output.replace('', output_dir) def cleanup_test_dirs(parent_dirs, output_dir): @@ -168,6 +173,7 @@ def cleanup_test_dirs(parent_dirs, output_dir): if os.path.exists(parent_dir_sub): os.removedirs(parent_dir_sub) + @pytest.mark.parametrize( 'jobs, init_dt, expected_output, parent_dirs', [ # single fake job, no parent dir @@ -216,6 +222,7 @@ def cleanup_test_dirs(parent_dirs, output_dir): ), ] ) +@pytest.mark.wrapper def test_handle_jobs_create_parent_dir(metplus_config, jobs, init_dt, expected_output, parent_dirs): # if init time is provided, calculate other time dict items @@ -254,6 +261,7 @@ def test_handle_jobs_create_parent_dir(metplus_config, jobs, init_dt, cleanup_test_dirs(parent_dirs, output_dir) +@pytest.mark.wrapper def test_get_config_file(metplus_config): fake_config_name = '/my/config/file' diff --git a/internal_tests/pytests/user_script/test_user_script.py b/internal_tests/pytests/wrappers/user_script/test_user_script.py similarity index 99% rename from internal_tests/pytests/user_script/test_user_script.py rename to internal_tests/pytests/wrappers/user_script/test_user_script.py index de69d76340..b45fe88108 100644 --- a/internal_tests/pytests/user_script/test_user_script.py +++ b/internal_tests/pytests/wrappers/user_script/test_user_script.py @@ -1,17 +1,13 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 -import os -import sys -import re -import logging -from collections import namedtuple import pytest + +import re from datetime import datetime -import produtil from metplus.wrappers.user_script_wrapper import UserScriptWrapper -from metplus.util import time_util + def sub_clock_time(input_cmd, clock_time): """! Helper function to replace clock time from config in expected output @@ -54,6 +50,7 @@ def sub_clock_time(input_cmd, clock_time): return output_cmd + def set_run_type_info(config, run_type): """! Set time values for init or valid time in config object @@ -347,6 +344,7 @@ def set_run_type_info(config, run_type): ['echo a'] * 12 + ['echo b'] * 12), ] ) +@pytest.mark.wrapper def test_run_user_script_all_times(metplus_config, input_configs, run_types, expected_cmds): config = metplus_config()