Skip to content

Commit

Permalink
Migrate launch tests to new launch_testing features & API (#140)
Browse files Browse the repository at this point in the history
* Update after launch_testing features becoming legacy.

Signed-off-by: Michel Hidalgo <[email protected]>

* Migrate rcutils tests to new launch_testing API.

Signed-off-by: Michel Hidalgo <[email protected]>

* Stop using injected attributes in launch tests.

Signed-off-by: Michel Hidalgo <[email protected]>
  • Loading branch information
hidmic authored Apr 30, 2019
1 parent dcb2733 commit e52f2cf
Show file tree
Hide file tree
Showing 4 changed files with 71 additions and 54 deletions.
14 changes: 10 additions & 4 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,8 @@ if(BUILD_TESTING)
find_package(ament_lint_auto REQUIRED)
ament_lint_auto_find_test_dependencies()

find_package(launch_testing_ament_cmake REQUIRED)

if(ament_cmake_cppcheck_FOUND)
ament_cppcheck(
TESTNAME "cppcheck_logging_macros"
Expand Down Expand Up @@ -150,15 +152,19 @@ if(BUILD_TESTING)

add_executable(test_logging_long_messages test/test_logging_long_messages.cpp)
target_link_libraries(test_logging_long_messages ${PROJECT_NAME})
ament_add_pytest_test(test_logging_long_messages
add_launch_test(
"test/test_logging_long_messages.py"
TARGET test_logging_long_messages
WORKING_DIRECTORY "$<TARGET_FILE_DIR:test_logging_long_messages>"
TIMEOUT 10)
TIMEOUT 10
)

ament_add_pytest_test(test_logging_output_format
add_launch_test(
"test/test_logging_output_format.py"
TARGET test_logging_output_format
WORKING_DIRECTORY "$<TARGET_FILE_DIR:test_logging_long_messages>"
TIMEOUT 10)
TIMEOUT 10
)

ament_add_gmock(test_logging_macros test/test_logging_macros.cpp)
target_link_libraries(test_logging_macros ${PROJECT_NAME})
Expand Down
1 change: 1 addition & 0 deletions package.xml
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
<test_depend>ament_lint_auto</test_depend>
<test_depend>launch</test_depend>
<test_depend>launch_testing</test_depend>
<test_depend>launch_testing_ament_cmake</test_depend>
<test_depend>osrf_testing_tools_cpp</test_depend>

<export>
Expand Down
41 changes: 22 additions & 19 deletions test/test_logging_long_messages.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,38 +13,41 @@
# limitations under the License.

import os
import unittest

from launch import LaunchDescription
from launch import LaunchService
from launch.actions import ExecuteProcess
from launch_testing import LaunchTestService
from launch_testing.output import create_output_test_from_file
from launch.actions import OpaqueFunction

import launch_testing
import launch_testing.asserts

def test_logging_long_messages():

def generate_test_description(ready_fn):
launch_description = LaunchDescription()
# Set the output format to a "verbose" format that is expected by the executable output
os.environ['RCUTILS_CONSOLE_OUTPUT_FORMAT'] = \
'[{severity}] [{name}]: {message} ({function_name}() at {file_name}:{line_number})'
executable = os.path.join(os.getcwd(), 'test_logging_long_messages')
if os.name == 'nt':
executable += '.exe'
ld = LaunchDescription()
launch_test = LaunchTestService()
action = launch_test.add_fixture_action(ld, ExecuteProcess(
cmd=[executable], name='test_logging_long_messages', output='screen'
process_name = 'test_logging_long_messages'
launch_description.add_action(ExecuteProcess(
cmd=[executable], name=process_name, output='screen'
))
output_file = os.path.join(
os.path.dirname(__file__), 'test_logging_long_messages'
)
launch_test.add_output_test(
ld, action, create_output_test_from_file(output_file)

launch_description.add_action(
OpaqueFunction(function=lambda context: ready_fn())
)
return launch_description, {'process_name': process_name}

launch_service = LaunchService()
launch_service.include_launch_description(ld)
return_code = launch_test.run(launch_service)
assert return_code == 0, 'Launch failed with exit code %r' % (return_code,)

class TestLoggingLongMessages(unittest.TestCase):

if __name__ == '__main__':
test_logging_long_messages()
def test_logging_output(self, proc_output, process_name):
"""Test executable output against expectation."""
proc_output.assertWaitFor(
expected_output=launch_testing.tools.expected_output_from_file(
path=os.path.join(os.path.dirname(__file__), process_name)
), process=process_name, timeout=10
)
69 changes: 38 additions & 31 deletions test/test_logging_output_format.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,17 +13,20 @@
# limitations under the License.

import os
import unittest

from launch import LaunchDescription
from launch import LaunchService
from launch.actions import ExecuteProcess
from launch_testing import LaunchTestService
from launch_testing.output import create_output_test_from_file
from launch.actions import OpaqueFunction

import launch_testing
import launch_testing.asserts

def test_logging_output_format():
ld = LaunchDescription()
launch_test = LaunchTestService()

def generate_test_description(ready_fn):
processes_to_test = []

launch_description = LaunchDescription()
# Re-use the test_logging_long_messages test binary and modify the output format from an
# environment variable.
executable = os.path.join(os.getcwd(), 'test_logging_long_messages')
Expand All @@ -35,55 +38,59 @@ def test_logging_output_format():
env_long['RCUTILS_CONSOLE_OUTPUT_FORMAT'] = \
'[{{name}}].({severity}) output: {file_name}:{line_number} {message}, again: {message} ({function_name}()){' # noqa
name = 'test_logging_output_format_long'
action = launch_test.add_fixture_action(ld, ExecuteProcess(
launch_description.add_action(ExecuteProcess(
cmd=[executable], env=env_long, name=name, output='screen'
))
output_file = os.path.join(os.path.dirname(__file__), name)
launch_test.add_output_test(
ld, action, create_output_test_from_file(output_file)
)
processes_to_test.append(name)

env_edge_cases = dict(os.environ)
# This custom output is to check different edge cases of the output format string parsing.
env_edge_cases['RCUTILS_CONSOLE_OUTPUT_FORMAT'] = '{}}].({unknown_token}) {{{{'
name = 'test_logging_output_format_edge_cases'
action = launch_test.add_fixture_action(ld, ExecuteProcess(
launch_description.add_action(ExecuteProcess(
cmd=[executable], env=env_edge_cases, name=name, output='screen'
))
output_file = os.path.join(os.path.dirname(__file__), name)
launch_test.add_output_test(
ld, action, create_output_test_from_file(output_file)
)
processes_to_test.append(name)

env_no_tokens = dict(os.environ)
# This custom output is to check that there are no issues when no tokens are used.
env_no_tokens['RCUTILS_CONSOLE_OUTPUT_FORMAT'] = 'no_tokens'
name = 'test_logging_output_format_no_tokens'
action = launch_test.add_fixture_action(ld, ExecuteProcess(
launch_description.add_action(ExecuteProcess(
cmd=[executable], env=env_no_tokens, name=name, output='screen'
))
output_file = os.path.join(os.path.dirname(__file__), name)
launch_test.add_output_test(
ld, action, create_output_test_from_file(output_file)
)
processes_to_test.append(name)

env_time_tokens = dict(os.environ)
# This custom output is to check that time stamps work correctly
env_time_tokens['RCUTILS_CONSOLE_OUTPUT_FORMAT'] = "'{time}' '{time_as_nanoseconds}'"
name = 'test_logging_output_timestamps'
action = launch_test.add_fixture_action(ld, ExecuteProcess(
launch_description.add_action(ExecuteProcess(
cmd=[executable], env=env_time_tokens, name=name, output='screen'
))
output_file = os.path.join(os.path.dirname(__file__), name)
launch_test.add_output_test(
ld, action, create_output_test_from_file(output_file)
processes_to_test.append(name)

launch_description.add_action(
OpaqueFunction(function=lambda context: ready_fn())
)

launch_service = LaunchService()
launch_service.include_launch_description(ld)
return_code = launch_test.run(launch_service)
assert return_code == 0, 'Launch failed with exit code %r' % (return_code,)
return launch_description, {'processes_to_test': processes_to_test}


@launch_testing.post_shutdown_test()
class TestLoggingOutputFormatAfterShutdown(unittest.TestCase):

def test_logging_output(self, proc_output, processes_to_test):
"""Test all executables output against expectations."""
for process_name in processes_to_test:
launch_testing.asserts.assertInStdout(
proc_output,
expected_output=launch_testing.tools.expected_output_from_file(
path=os.path.join(os.path.dirname(__file__), process_name)
),
process=process_name
)

if __name__ == '__main__':
test_logging_output_format()
def test_processes_exit_codes(self, proc_info):
"""Test that all executables finished cleanly."""
launch_testing.asserts.assertExitCodes(proc_info)

0 comments on commit e52f2cf

Please sign in to comment.