diff --git a/pythonFiles/tests/pytestadapter/.data/error_raise_exception.py b/pythonFiles/tests/pytestadapter/.data/error_raise_exception.py new file mode 100644 index 000000000000..2506089abe07 --- /dev/null +++ b/pythonFiles/tests/pytestadapter/.data/error_raise_exception.py @@ -0,0 +1,14 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import pytest + + +@pytest.fixture +def raise_fixture(): + raise Exception("Dummy exception") + + +class TestSomething: + def test_a(self, raise_fixture): + assert True diff --git a/pythonFiles/tests/pytestadapter/.data/parametrize_tests.py b/pythonFiles/tests/pytestadapter/.data/parametrize_tests.py index 6911c9aec7f0..a39b7c26de9f 100644 --- a/pythonFiles/tests/pytestadapter/.data/parametrize_tests.py +++ b/pythonFiles/tests/pytestadapter/.data/parametrize_tests.py @@ -1,3 +1,6 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + import pytest diff --git a/pythonFiles/tests/pytestadapter/.data/skip_tests.py b/pythonFiles/tests/pytestadapter/.data/skip_tests.py new file mode 100644 index 000000000000..113e3506932a --- /dev/null +++ b/pythonFiles/tests/pytestadapter/.data/skip_tests.py @@ -0,0 +1,30 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import pytest + +# Testing pytest with skipped tests. The first passes, the second three are skipped. + + +def test_something(): # test_marker--test_something + # This tests passes successfully. + assert 1 + 1 == 2 + + +def test_another_thing(): # test_marker--test_another_thing + # Skip this test with a reason. + pytest.skip("Skipping this test for now") + + +@pytest.mark.skip( + reason="Skipping this test as it requires additional setup" # test_marker--test_complex_thing +) +def test_decorator_thing(): + # Skip this test as well, with a reason. This one uses a decorator. + assert True + + +@pytest.mark.skipif(1 < 5, reason="is always true") # test_marker--test_complex_thing_2 +def test_decorator_thing_2(): + # Skip this test as well, with a reason. This one uses a decorator with a condition. + assert True diff --git a/pythonFiles/tests/pytestadapter/expected_discovery_test_output.py b/pythonFiles/tests/pytestadapter/expected_discovery_test_output.py index b05ed5e9f00c..fb8234350fb4 100644 --- a/pythonFiles/tests/pytestadapter/expected_discovery_test_output.py +++ b/pythonFiles/tests/pytestadapter/expected_discovery_test_output.py @@ -489,7 +489,10 @@ { "name": "[1]", "path": parameterize_tests_path, - "lineno": "15", + "lineno": find_test_line_number( + "test_under_ten[1]", + parameterize_tests_path, + ), "type_": "test", "id_": "parametrize_tests.py::test_under_ten[1]", "runID": "parametrize_tests.py::test_under_ten[1]", @@ -497,7 +500,10 @@ { "name": "[2]", "path": parameterize_tests_path, - "lineno": "15", + "lineno": find_test_line_number( + "test_under_ten[2]", + parameterize_tests_path, + ), "type_": "test", "id_": "parametrize_tests.py::test_under_ten[2]", "runID": "parametrize_tests.py::test_under_ten[2]", diff --git a/pythonFiles/tests/pytestadapter/expected_execution_test_output.py b/pythonFiles/tests/pytestadapter/expected_execution_test_output.py index abe27ffc79ce..3d24f036fe2a 100644 --- a/pythonFiles/tests/pytestadapter/expected_execution_test_output.py +++ b/pythonFiles/tests/pytestadapter/expected_execution_test_output.py @@ -150,6 +150,57 @@ }, } +# This is the expected output for the error_raised_exception.py file. +# └── error_raise_exception.py +# ├── TestSomething +# │ └── test_a: failure +error_raised_exception_execution_expected_output = { + "error_raise_exception.py::TestSomething::test_a": { + "test": "error_raise_exception.py::TestSomething::test_a", + "outcome": "failure", + "message": "ERROR MESSAGE", + "traceback": "TRACEBACK", + "subtest": None, + } +} + +# This is the expected output for the skip_tests.py file. +# └── test_something: success +# └── test_another_thing: skipped +# └── test_decorator_thing: skipped +# └── test_decorator_thing_2: skipped +skip_tests_execution_expected_output = { + "skip_tests.py::test_something": { + "test": "skip_tests.py::test_something", + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + "skip_tests.py::test_another_thing": { + "test": "skip_tests.py::test_another_thing", + "outcome": "skipped", + "message": None, + "traceback": None, + "subtest": None, + }, + "skip_tests.py::test_decorator_thing": { + "test": "skip_tests.py::test_decorator_thing", + "outcome": "skipped", + "message": None, + "traceback": None, + "subtest": None, + }, + "skip_tests.py::test_decorator_thing_2": { + "test": "skip_tests.py::test_decorator_thing_2", + "outcome": "skipped", + "message": None, + "traceback": None, + "subtest": None, + }, +} + + # This is the expected output for the dual_level_nested_folder.py tests # └── dual_level_nested_folder # └── test_top_folder.py diff --git a/pythonFiles/tests/pytestadapter/test_execution.py b/pythonFiles/tests/pytestadapter/test_execution.py index 400ef9f883bc..f147a0462f38 100644 --- a/pythonFiles/tests/pytestadapter/test_execution.py +++ b/pythonFiles/tests/pytestadapter/test_execution.py @@ -56,6 +56,19 @@ def test_bad_id_error_execution(): @pytest.mark.parametrize( "test_ids, expected_const", [ + ( + [ + "skip_tests.py::test_something", + "skip_tests.py::test_another_thing", + "skip_tests.py::test_decorator_thing", + "skip_tests.py::test_decorator_thing_2", + ], + expected_execution_test_output.skip_tests_execution_expected_output, + ), + ( + ["error_raise_exception.py::TestSomething::test_a"], + expected_execution_test_output.error_raised_exception_execution_expected_output, + ), ( [ "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", @@ -161,4 +174,6 @@ def test_pytest_execution(test_ids, expected_const): for key in actual_result_dict: if actual_result_dict[key]["outcome"] == "failure": actual_result_dict[key]["message"] = "ERROR MESSAGE" + if actual_result_dict[key]["traceback"] != None: + actual_result_dict[key]["traceback"] = "TRACEBACK" assert actual_result_dict == expected_const diff --git a/pythonFiles/vscode_pytest/__init__.py b/pythonFiles/vscode_pytest/__init__.py index d5d7d0e6a9f2..b14a79aef7fd 100644 --- a/pythonFiles/vscode_pytest/__init__.py +++ b/pythonFiles/vscode_pytest/__init__.py @@ -69,14 +69,37 @@ def pytest_exception_interact(node, call, report): """ # call.excinfo is the captured exception of the call, if it raised as type ExceptionInfo. # call.excinfo.exconly() returns the exception as a string. - if call.excinfo and call.excinfo.typename != "AssertionError": - ERRORS.append( - call.excinfo.exconly() + "\n Check Python Test Logs for more details." - ) + # See if it is during discovery or execution. + # if discovery, then add the error to error logs. + if type(report) == pytest.CollectReport: + if call.excinfo and call.excinfo.typename != "AssertionError": + ERRORS.append( + call.excinfo.exconly() + "\n Check Python Test Logs for more details." + ) + else: + ERRORS.append( + report.longreprtext + "\n Check Python Test Logs for more details." + ) else: - ERRORS.append( - report.longreprtext + "\n Check Python Test Logs for more details." - ) + # if execution, send this data that the given node failed. + report_value = "failure" + node_id = str(node.nodeid) + if node_id not in collected_tests_so_far: + collected_tests_so_far.append(node_id) + item_result = create_test_outcome( + node_id, + report_value, + "Test failed with exception", + report.longreprtext, + ) + collected_test = testRunResultDict() + collected_test[node_id] = item_result + cwd = pathlib.Path.cwd() + execution_post( + os.fsdecode(cwd), + "success", + collected_test if collected_test else None, + ) def pytest_keyboard_interrupt(excinfo): @@ -183,6 +206,35 @@ def pytest_report_teststatus(report, config): } +def pytest_runtest_protocol(item, nextitem): + if item.own_markers: + for marker in item.own_markers: + # If the test is marked with skip then it will not hit the pytest_report_teststatus hook, + # therefore we need to handle it as skipped here. + skip_condition = False + if marker.name == "skipif": + skip_condition = any(marker.args) + if marker.name == "skip" or skip_condition: + node_id = str(item.nodeid) + report_value = "skipped" + cwd = pathlib.Path.cwd() + if node_id not in collected_tests_so_far: + collected_tests_so_far.append(node_id) + item_result = create_test_outcome( + node_id, + report_value, + None, + None, + ) + collected_test = testRunResultDict() + collected_test[node_id] = item_result + execution_post( + os.fsdecode(cwd), + "success", + collected_test if collected_test else None, + ) + + def pytest_sessionfinish(session, exitstatus): """A pytest hook that is called after pytest has fulled finished.