From 8d8c9b9659008bd9f77a857f97195408e9a34ead Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sat, 26 Dec 2020 14:32:01 +0100 Subject: [PATCH 001/113] :alembic: move CI targets to CMake --- CMakeLists.txt | 5 + cmake/ci.cmake | 329 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 334 insertions(+) create mode 100644 cmake/ci.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt index 44ede3e799..58573b0af7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -34,6 +34,11 @@ option(JSON_BuildTests "Build the unit tests when BUILD_TESTING is enabled." ${M option(JSON_Install "Install CMake targets during install step." ${MAIN_PROJECT}) option(JSON_MultipleHeaders "Use non-amalgamated version of the library." OFF) option(JSON_ImplicitConversions "Enable implicit conversions." ON) +option(JSON_CI "Enable CI build targets." OFF) + +if (JSON_CI) + include(cmake/ci.cmake) +endif () ## ## CONFIGURATION diff --git a/cmake/ci.cmake b/cmake/ci.cmake new file mode 100644 index 0000000000..6c3c40d61a --- /dev/null +++ b/cmake/ci.cmake @@ -0,0 +1,329 @@ +find_program(CLANG_TIDY_TOOL NAMES clang-tidy REQUIRED) +find_program(CLANG_TOOL NAMES clang++-HEAD clang++-11 clang++ REQUIRED) +find_program(CPPCHECK_TOOL NAMES cppcheck REQUIRED) +find_program(GCC_TOOL NAMES g++-HEAD g++-11 g++ REQUIRED) +find_program(PLOG_CONVERTER_TOOL NAMES plog-converter REQUIRED) +find_program(PVS_STUDIO_ANALYZER_TOOL NAMES pvs-studio-analyzer REQUIRED) +find_program(SCAN_BUILD_TOOL NAMES scan-build REQUIRED) + +set(CLANG_CXXFLAGS "-std=c++11 \ + -Werror \ + -Weverything \ + -Wno-c++98-compat \ + -Wno-c++98-compat-pedantic \ + -Wno-c++2a-compat \ + -Wno-deprecated-declarations \ + -Wno-documentation-unknown-command \ + -Wno-exit-time-destructors \ + -Wno-float-equal \ + -Wno-missing-prototypes \ + -Wno-padded \ + -Wno-range-loop-analysis \ + -Wno-switch-enum -Wno-covered-switch-default \ + -Wno-weak-vtables \ +") + +set(GCC_CXXFLAGS "-std=c++11 \ + -pedantic \ + -Werror \ + --all-warnings \ + --extra-warnings \ + -W \ + -Wno-abi-tag \ + -Waddress \ + -Waddress-of-packed-member \ + -Wno-aggregate-return \ + -Waggressive-loop-optimizations \ + -Waligned-new=all \ + -Wall \ + -Walloc-zero \ + -Walloca \ + -Wanalyzer-double-fclose \ + -Wanalyzer-double-free \ + -Wanalyzer-exposure-through-output-file \ + -Wanalyzer-file-leak \ + -Wanalyzer-free-of-non-heap \ + -Wanalyzer-malloc-leak \ + -Wanalyzer-null-argument \ + -Wanalyzer-null-dereference \ + -Wanalyzer-possible-null-argument \ + -Wanalyzer-possible-null-dereference \ + -Wanalyzer-stale-setjmp-buffer \ + -Wanalyzer-tainted-array-index \ + -Wanalyzer-too-complex \ + -Wanalyzer-unsafe-call-within-signal-handler \ + -Wanalyzer-use-after-free \ + -Wanalyzer-use-of-pointer-in-stale-stack-frame \ + -Warith-conversion \ + -Warray-bounds \ + -Warray-bounds=2 \ + -Wattribute-alias=2 \ + -Wattribute-warning \ + -Wattributes \ + -Wbool-compare \ + -Wbool-operation \ + -Wbuiltin-declaration-mismatch \ + -Wbuiltin-macro-redefined \ + -Wc++0x-compat \ + -Wc++11-compat \ + -Wc++14-compat \ + -Wc++17-compat \ + -Wc++1z-compat \ + -Wc++20-compat \ + -Wc++2a-compat \ + -Wcannot-profile \ + -Wcast-align \ + -Wcast-align=strict \ + -Wcast-function-type \ + -Wcast-qual \ + -Wcatch-value=3 \ + -Wchar-subscripts \ + -Wclass-conversion \ + -Wclass-memaccess \ + -Wclobbered \ + -Wcomma-subscript \ + -Wcomment \ + -Wcomments \ + -Wconditionally-supported \ + -Wconversion \ + -Wconversion-null \ + -Wcoverage-mismatch \ + -Wcpp \ + -Wctor-dtor-privacy \ + -Wdangling-else \ + -Wdate-time \ + -Wdelete-incomplete \ + -Wdelete-non-virtual-dtor \ + -Wdeprecated \ + -Wdeprecated-copy \ + -Wdeprecated-copy-dtor \ + -Wdeprecated-declarations \ + -Wdisabled-optimization \ + -Wdiv-by-zero \ + -Wdouble-promotion \ + -Wduplicated-branches \ + -Wduplicated-cond \ + -Weffc++ \ + -Wempty-body \ + -Wendif-labels \ + -Wenum-compare \ + -Wexpansion-to-defined \ + -Wextra \ + -Wextra-semi \ + -Wfloat-conversion \ + -Wfloat-equal \ + -Wformat -Wformat-contains-nul \ + -Wformat -Wformat-extra-args \ + -Wformat -Wformat-nonliteral \ + -Wformat -Wformat-security \ + -Wformat -Wformat-y2k \ + -Wformat -Wformat-zero-length \ + -Wformat-diag \ + -Wformat-overflow=2 \ + -Wformat-signedness \ + -Wformat-truncation=2 \ + -Wformat=2 \ + -Wframe-address \ + -Wfree-nonheap-object \ + -Whsa \ + -Wif-not-aligned \ + -Wignored-attributes \ + -Wignored-qualifiers \ + -Wimplicit-fallthrough=5 \ + -Winaccessible-base \ + -Winherited-variadic-ctor \ + -Winit-list-lifetime \ + -Winit-self \ + -Winline \ + -Wint-in-bool-context \ + -Wint-to-pointer-cast \ + -Winvalid-memory-model \ + -Winvalid-offsetof \ + -Winvalid-pch \ + -Wliteral-suffix \ + -Wlogical-not-parentheses \ + -Wlogical-op \ + -Wno-long-long \ + -Wlto-type-mismatch \ + -Wmain \ + -Wmaybe-uninitialized \ + -Wmemset-elt-size \ + -Wmemset-transposed-args \ + -Wmisleading-indentation \ + -Wmismatched-tags \ + -Wmissing-attributes \ + -Wmissing-braces \ + -Wno-missing-declarations \ + -Wmissing-field-initializers \ + -Wmissing-include-dirs \ + -Wmissing-profile \ + -Wmultichar \ + -Wmultiple-inheritance \ + -Wmultistatement-macros \ + -Wno-namespaces \ + -Wnarrowing \ + -Wno-noexcept \ + -Wnoexcept-type \ + -Wnon-template-friend \ + -Wnon-virtual-dtor \ + -Wnonnull \ + -Wnonnull-compare \ + -Wnonportable-cfstrings \ + -Wnormalized=nfkc \ + -Wnull-dereference \ + -Wodr \ + -Wold-style-cast \ + -Wopenmp-simd \ + -Woverflow \ + -Woverlength-strings \ + -Woverloaded-virtual \ + -Wpacked \ + -Wpacked-bitfield-compat \ + -Wpacked-not-aligned \ + -Wno-padded \ + -Wparentheses \ + -Wpedantic \ + -Wpessimizing-move \ + -Wplacement-new=2 \ + -Wpmf-conversions \ + -Wpointer-arith \ + -Wpointer-compare \ + -Wpragmas \ + -Wprio-ctor-dtor \ + -Wpsabi \ + -Wredundant-decls \ + -Wredundant-move \ + -Wredundant-tags \ + -Wregister \ + -Wreorder \ + -Wrestrict \ + -Wreturn-local-addr \ + -Wreturn-type \ + -Wscalar-storage-order \ + -Wsequence-point \ + -Wshadow=compatible-local \ + -Wshadow=global \ + -Wshadow=local \ + -Wshift-count-negative \ + -Wshift-count-overflow \ + -Wshift-negative-value \ + -Wshift-overflow=2 \ + -Wsign-compare \ + -Wsign-conversion \ + -Wsign-promo \ + -Wsized-deallocation \ + -Wsizeof-array-argument \ + -Wsizeof-pointer-div \ + -Wsizeof-pointer-memaccess \ + -Wstack-protector \ + -Wstrict-aliasing \ + -Wstrict-aliasing=3 \ + -Wstrict-null-sentinel \ + -Wstrict-overflow \ + -Wstrict-overflow=5 \ + -Wstring-compare \ + -Wstringop-overflow \ + -Wstringop-overflow=4 \ + -Wstringop-truncation \ + -Wsubobject-linkage \ + -Wsuggest-attribute=cold \ + -Wsuggest-attribute=const \ + -Wsuggest-attribute=format \ + -Wsuggest-attribute=malloc \ + -Wsuggest-attribute=noreturn \ + -Wsuggest-attribute=pure \ + -Wsuggest-final-methods \ + -Wsuggest-final-types \ + -Wsuggest-override \ + -Wswitch \ + -Wswitch-bool \ + -Wswitch-default \ + -Wno-switch-enum \ + -Wswitch-outside-range \ + -Wswitch-unreachable \ + -Wsync-nand \ + -Wsynth \ + -Wno-system-headers \ + -Wtautological-compare \ + -Wno-templates \ + -Wterminate \ + -Wtrampolines \ + -Wtrigraphs \ + -Wtype-limits \ + -Wundef \ + -Wuninitialized \ + -Wunknown-pragmas \ + -Wunreachable-code \ + -Wunsafe-loop-optimizations \ + -Wunused \ + -Wunused-but-set-parameter \ + -Wunused-but-set-variable \ + -Wunused-const-variable=2 \ + -Wunused-function \ + -Wunused-label \ + -Wno-unused-local-typedefs \ + -Wunused-macros \ + -Wunused-parameter \ + -Wunused-result \ + -Wunused-value \ + -Wunused-variable \ + -Wuseless-cast \ + -Wvarargs \ + -Wvariadic-macros \ + -Wvector-operation-performance \ + -Wvirtual-inheritance \ + -Wvirtual-move-assign \ + -Wvla \ + -Wvolatile \ + -Wvolatile-register-var \ + -Wwrite-strings \ + -Wzero-as-null-pointer-constant \ + -Wzero-length-bounds \ +") + +add_custom_target(ci_test_gcc + COMMAND CXX=${GCC_TOOL} CXXFLAGS=${GCC_CXXFLAGS} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_g++ -DJSON_BuildTests=ON -GNinja + COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_g++ + COMMAND cd ${PROJECT_BINARY_DIR}/build_g++/test && ${CMAKE_CTEST_COMMAND} -j10 + COMMENT "Compile and test with GCC" +) + +add_custom_target(ci_test_clang + COMMAND CXX=${CLANG_TOOL} CXXFLAGS=${CLANG_CXXFLAGS} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_clang -DJSON_BuildTests=ON -GNinja + COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_clang + COMMAND cd ${PROJECT_BINARY_DIR}/build_clang/test && ${CMAKE_CTEST_COMMAND} -j10 + COMMENT "Compile and test with Clang" +) + +set(CLANG_ANALYZER_CHECKS "fuchsia.HandleChecker,nullability.NullableDereferenced,nullability.NullablePassedToNonnull,nullability.NullableReturnedFromNonnull,optin.cplusplus.UninitializedObject,optin.cplusplus.VirtualCall,optin.mpi.MPI-Checker,optin.osx.OSObjectCStyleCast,optin.osx.cocoa.localizability.EmptyLocalizationContextChecker,optin.osx.cocoa.localizability.NonLocalizedStringChecker,optin.performance.GCDAntipattern,optin.performance.Padding,optin.portability.UnixAPI,security.FloatLoopCounter,security.insecureAPI.DeprecatedOrUnsafeBufferHandling,security.insecureAPI.bcmp,security.insecureAPI.bcopy,security.insecureAPI.bzero,security.insecureAPI.rand,security.insecureAPI.strcpy,valist.CopyToSelf,valist.Uninitialized,valist.Unterminated,webkit.NoUncountedMemberChecker,webkit.RefCntblBaseVirtualDtor,core.CallAndMessage,core.DivideZero,core.NonNullParamChecker,core.NullDereference,core.StackAddressEscape,core.UndefinedBinaryOperatorResult,core.VLASize,core.uninitialized.ArraySubscript,core.uninitialized.Assign,core.uninitialized.Branch,core.uninitialized.CapturedBlockVariable,core.uninitialized.UndefReturn,cplusplus.InnerPointer,cplusplus.Move,cplusplus.NewDelete,cplusplus.NewDeleteLeaks,cplusplus.PlacementNew,cplusplus.PureVirtualCall,deadcode.DeadStores,nullability.NullPassedToNonnull,nullability.NullReturnedFromNonnull,osx.API,osx.MIG,osx.NumberObjectConversion,osx.OSObjectRetainCount,osx.ObjCProperty,osx.SecKeychainAPI,osx.cocoa.AtSync,osx.cocoa.AutoreleaseWrite,osx.cocoa.ClassRelease,osx.cocoa.Dealloc,osx.cocoa.IncompatibleMethodTypes,osx.cocoa.Loops,osx.cocoa.MissingSuperCall,osx.cocoa.NSAutoreleasePool,osx.cocoa.NSError,osx.cocoa.NilArg,osx.cocoa.NonNilReturnValue,osx.cocoa.ObjCGenerics,osx.cocoa.RetainCount,osx.cocoa.RunLoopAutoreleaseLeak,osx.cocoa.SelfInit,osx.cocoa.SuperDealloc,osx.cocoa.UnusedIvars,osx.cocoa.VariadicMethodTypes,osx.coreFoundation.CFError,osx.coreFoundation.CFNumber,osx.coreFoundation.CFRetainRelease,osx.coreFoundation.containers.OutOfBounds,osx.coreFoundation.containers.PointerSizedValues,security.insecureAPI.UncheckedReturn,security.insecureAPI.decodeValueOfObjCType,security.insecureAPI.getpw,security.insecureAPI.gets,security.insecureAPI.mkstemp,security.insecureAPI.mktemp,security.insecureAPI.vfork,unix.API,unix.Malloc,unix.MallocSizeof,unix.MismatchedDeallocator,unix.Vfork,unix.cstring.BadSizeArg,unix.cstring.NullArg") + +add_custom_target(ci_test_clang_analyze + COMMAND CXX=${CLANG_TOOL} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_clang_analyze -DJSON_BuildTests=ON -GNinja + COMMAND cd ${PROJECT_BINARY_DIR}/build_clang_analyze && ${SCAN_BUILD_TOOL} -enable-checker ${CLANG_ANALYZER_CHECKS} --use-c++=${CLANG_TOOL} -analyze-headers -o ${PROJECT_BINARY_DIR}/report ninja + COMMENT "Compile and test with Clang Analyzer" +) + +add_custom_target(ci_cppcheck + COMMAND ${CPPCHECK_TOOL} --enable=warning --inline-suppr --inconclusive --force --std=c++11 ${PROJECT_SOURCE_DIR}/single_include/nlohmann/json.hpp --error-exitcode=1 + COMMENT "Check code with Cppcheck" +) + +file(GLOB_RECURSE SRC_FILES ${PROJECT_SOURCE_DIR}/include/nlohmann/*.hpp) + +add_custom_target(ci_clang_tidy + COMMAND CXX=${CLANG_TOOL} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_clang_tidy -DJSON_BuildTests=ON -DJSON_MultipleHeaders=ON -GNinja -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DCMAKE_CXX_CLANG_TIDY=clang-tidy + COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_clang_tidy + COMMENT "Check code with Clang-Tidy" +) + +add_custom_target(ci_pvs_studio + COMMAND CXX=${CLANG_TOOL} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_pvs_studio -DJSON_BuildTests=ON -DCMAKE_EXPORT_COMPILE_COMMANDS=ON + COMMAND cd ${PROJECT_BINARY_DIR}/build_pvs_studio && ${PVS_STUDIO_ANALYZER_TOOL} analyze -j 10 + COMMAND cd ${PROJECT_BINARY_DIR}/build_pvs_studio && ${PLOG_CONVERTER_TOOL} -a'GA:1,2;64:1;CS' -t fullhtml PVS-Studio.log -o pvs + COMMENT "Check code with PVS Studio" +) + +add_custom_target(ci_clean + COMMAND rm -fr ${PROJECT_BINARY_DIR}/build_g++ ${PROJECT_BINARY_DIR}/build_clang ${PROJECT_BINARY_DIR}/build_clang_analyze ${PROJECT_BINARY_DIR}/build_clang_tidy ${PROJECT_BINARY_DIR}/build_pvs_studio + COMMENT "Clean generated directories" +) From 5276ab9c447826cae1e22c65649ab30ffdd2d334 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sat, 26 Dec 2020 15:09:50 +0100 Subject: [PATCH 002/113] :recycle: add target for cpplint --- cmake/ci.cmake | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cmake/ci.cmake b/cmake/ci.cmake index 6c3c40d61a..d62221860b 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -310,6 +310,11 @@ add_custom_target(ci_cppcheck file(GLOB_RECURSE SRC_FILES ${PROJECT_SOURCE_DIR}/include/nlohmann/*.hpp) +add_custom_target(ci_cpplint + COMMAND ${CMAKE_SOURCE_DIR}/third_party/cpplint/cpplint.py --filter=-whitespace,-legal,-runtime/references,-runtime/explicit --quiet --recursive ${SRC_FILES} + COMMENT "Check code with cpplint" +) + add_custom_target(ci_clang_tidy COMMAND CXX=${CLANG_TOOL} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_clang_tidy -DJSON_BuildTests=ON -DJSON_MultipleHeaders=ON -GNinja -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DCMAKE_CXX_CLANG_TIDY=clang-tidy COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_clang_tidy From d9941bdaee2ef9bbf0a42206ecd4348d8c0fc734 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sat, 26 Dec 2020 17:55:24 +0100 Subject: [PATCH 003/113] :recycle: add target for self-contained binaries --- cmake/ci.cmake | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/cmake/ci.cmake b/cmake/ci.cmake index d62221860b..65ad0f5514 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -295,6 +295,15 @@ add_custom_target(ci_test_clang COMMENT "Compile and test with Clang" ) +set(CLANG_CXX_FLAGS_SANITIZER "-g -O0 -fsanitize=address -fsanitize=undefined -fsanitize=integer -fsanitize=nullability -fno-omit-frame-pointer -fno-sanitize-recover=all -fsanitize-recover=unsigned-integer-overflow") + +add_custom_target(ci_test_clang_sanitizer + COMMAND CXX=${CLANG_TOOL} CXXFLAGS=${CLANG_CXX_FLAGS_SANITIZER} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_clang_sanitizer -DJSON_BuildTests=ON -GNinja + COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_clang_sanitizer + COMMAND cd ${PROJECT_BINARY_DIR}/build_clang_sanitizer/test && ${CMAKE_CTEST_COMMAND} -j10 + COMMENT "Compile and test with Clang" +) + set(CLANG_ANALYZER_CHECKS "fuchsia.HandleChecker,nullability.NullableDereferenced,nullability.NullablePassedToNonnull,nullability.NullableReturnedFromNonnull,optin.cplusplus.UninitializedObject,optin.cplusplus.VirtualCall,optin.mpi.MPI-Checker,optin.osx.OSObjectCStyleCast,optin.osx.cocoa.localizability.EmptyLocalizationContextChecker,optin.osx.cocoa.localizability.NonLocalizedStringChecker,optin.performance.GCDAntipattern,optin.performance.Padding,optin.portability.UnixAPI,security.FloatLoopCounter,security.insecureAPI.DeprecatedOrUnsafeBufferHandling,security.insecureAPI.bcmp,security.insecureAPI.bcopy,security.insecureAPI.bzero,security.insecureAPI.rand,security.insecureAPI.strcpy,valist.CopyToSelf,valist.Uninitialized,valist.Unterminated,webkit.NoUncountedMemberChecker,webkit.RefCntblBaseVirtualDtor,core.CallAndMessage,core.DivideZero,core.NonNullParamChecker,core.NullDereference,core.StackAddressEscape,core.UndefinedBinaryOperatorResult,core.VLASize,core.uninitialized.ArraySubscript,core.uninitialized.Assign,core.uninitialized.Branch,core.uninitialized.CapturedBlockVariable,core.uninitialized.UndefReturn,cplusplus.InnerPointer,cplusplus.Move,cplusplus.NewDelete,cplusplus.NewDeleteLeaks,cplusplus.PlacementNew,cplusplus.PureVirtualCall,deadcode.DeadStores,nullability.NullPassedToNonnull,nullability.NullReturnedFromNonnull,osx.API,osx.MIG,osx.NumberObjectConversion,osx.OSObjectRetainCount,osx.ObjCProperty,osx.SecKeychainAPI,osx.cocoa.AtSync,osx.cocoa.AutoreleaseWrite,osx.cocoa.ClassRelease,osx.cocoa.Dealloc,osx.cocoa.IncompatibleMethodTypes,osx.cocoa.Loops,osx.cocoa.MissingSuperCall,osx.cocoa.NSAutoreleasePool,osx.cocoa.NSError,osx.cocoa.NilArg,osx.cocoa.NonNilReturnValue,osx.cocoa.ObjCGenerics,osx.cocoa.RetainCount,osx.cocoa.RunLoopAutoreleaseLeak,osx.cocoa.SelfInit,osx.cocoa.SuperDealloc,osx.cocoa.UnusedIvars,osx.cocoa.VariadicMethodTypes,osx.coreFoundation.CFError,osx.coreFoundation.CFNumber,osx.coreFoundation.CFRetainRelease,osx.coreFoundation.containers.OutOfBounds,osx.coreFoundation.containers.PointerSizedValues,security.insecureAPI.UncheckedReturn,security.insecureAPI.decodeValueOfObjCType,security.insecureAPI.getpw,security.insecureAPI.gets,security.insecureAPI.mkstemp,security.insecureAPI.mktemp,security.insecureAPI.vfork,unix.API,unix.Malloc,unix.MallocSizeof,unix.MismatchedDeallocator,unix.Vfork,unix.cstring.BadSizeArg,unix.cstring.NullArg") add_custom_target(ci_test_clang_analyze @@ -328,7 +337,20 @@ add_custom_target(ci_pvs_studio COMMENT "Check code with PVS Studio" ) +foreach(SRC_FILE ${SRC_FILES}) + string(MD5 filename "${SRC_FILE}") + file(WRITE "${PROJECT_BINARY_DIR}/single/${filename}.cpp" "#include <${SRC_FILE}>\nint main() {}\n") + add_executable(single_${filename} EXCLUDE_FROM_ALL ${PROJECT_BINARY_DIR}/single/${filename}.cpp) + target_include_directories(single_${filename} PRIVATE ${PROJECT_SOURCE_DIR}/include) + target_compile_features(single_${filename} PRIVATE cxx_std_11) + list(APPEND single_binaries single_${filename}) +endforeach() + +add_custom_target(ci_single_binaries + DEPENDS ${single_binaries} +) + add_custom_target(ci_clean - COMMAND rm -fr ${PROJECT_BINARY_DIR}/build_g++ ${PROJECT_BINARY_DIR}/build_clang ${PROJECT_BINARY_DIR}/build_clang_analyze ${PROJECT_BINARY_DIR}/build_clang_tidy ${PROJECT_BINARY_DIR}/build_pvs_studio + COMMAND rm -fr ${PROJECT_BINARY_DIR}/build_g++ ${PROJECT_BINARY_DIR}/build_clang ${PROJECT_BINARY_DIR}/build_clang_analyze ${PROJECT_BINARY_DIR}/build_clang_tidy ${PROJECT_BINARY_DIR}/build_pvs_studio ${PROJECT_BINARY_DIR}/build_clang_sanitizer ${single_binaries} COMMENT "Clean generated directories" ) From 6f7ffa53d414b65ae64d2ea6b5cb1dc43b8a673d Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sat, 26 Dec 2020 22:37:29 +0100 Subject: [PATCH 004/113] :recycle: add targets for iwyu and infer --- cmake/ci.cmake | 94 ++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 83 insertions(+), 11 deletions(-) diff --git a/cmake/ci.cmake b/cmake/ci.cmake index 65ad0f5514..05961a4021 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -1,11 +1,23 @@ +############################################################################### +# Needed tools. +############################################################################### + +include(FindPython3) +find_package(Python3 COMPONENTS Interpreter) + find_program(CLANG_TIDY_TOOL NAMES clang-tidy REQUIRED) find_program(CLANG_TOOL NAMES clang++-HEAD clang++-11 clang++ REQUIRED) find_program(CPPCHECK_TOOL NAMES cppcheck REQUIRED) find_program(GCC_TOOL NAMES g++-HEAD g++-11 g++ REQUIRED) +find_program(INFER_TOOL NAMES infer REQUIRED) +find_program(IWYU_TOOL NAMES iwyu_tool.py REQUIRED) find_program(PLOG_CONVERTER_TOOL NAMES plog-converter REQUIRED) find_program(PVS_STUDIO_ANALYZER_TOOL NAMES pvs-studio-analyzer REQUIRED) find_program(SCAN_BUILD_TOOL NAMES scan-build REQUIRED) +# the individual source files +file(GLOB_RECURSE SRC_FILES ${PROJECT_SOURCE_DIR}/include/nlohmann/*.hpp) + set(CLANG_CXXFLAGS "-std=c++11 \ -Werror \ -Weverything \ @@ -295,6 +307,10 @@ add_custom_target(ci_test_clang COMMENT "Compile and test with Clang" ) +############################################################################### +# Sanitizers. +############################################################################### + set(CLANG_CXX_FLAGS_SANITIZER "-g -O0 -fsanitize=address -fsanitize=undefined -fsanitize=integer -fsanitize=nullability -fno-omit-frame-pointer -fno-sanitize-recover=all -fsanitize-recover=unsigned-integer-overflow") add_custom_target(ci_test_clang_sanitizer @@ -304,32 +320,60 @@ add_custom_target(ci_test_clang_sanitizer COMMENT "Compile and test with Clang" ) +############################################################################### +# Check code with Clang Static Analyzer. +############################################################################### + set(CLANG_ANALYZER_CHECKS "fuchsia.HandleChecker,nullability.NullableDereferenced,nullability.NullablePassedToNonnull,nullability.NullableReturnedFromNonnull,optin.cplusplus.UninitializedObject,optin.cplusplus.VirtualCall,optin.mpi.MPI-Checker,optin.osx.OSObjectCStyleCast,optin.osx.cocoa.localizability.EmptyLocalizationContextChecker,optin.osx.cocoa.localizability.NonLocalizedStringChecker,optin.performance.GCDAntipattern,optin.performance.Padding,optin.portability.UnixAPI,security.FloatLoopCounter,security.insecureAPI.DeprecatedOrUnsafeBufferHandling,security.insecureAPI.bcmp,security.insecureAPI.bcopy,security.insecureAPI.bzero,security.insecureAPI.rand,security.insecureAPI.strcpy,valist.CopyToSelf,valist.Uninitialized,valist.Unterminated,webkit.NoUncountedMemberChecker,webkit.RefCntblBaseVirtualDtor,core.CallAndMessage,core.DivideZero,core.NonNullParamChecker,core.NullDereference,core.StackAddressEscape,core.UndefinedBinaryOperatorResult,core.VLASize,core.uninitialized.ArraySubscript,core.uninitialized.Assign,core.uninitialized.Branch,core.uninitialized.CapturedBlockVariable,core.uninitialized.UndefReturn,cplusplus.InnerPointer,cplusplus.Move,cplusplus.NewDelete,cplusplus.NewDeleteLeaks,cplusplus.PlacementNew,cplusplus.PureVirtualCall,deadcode.DeadStores,nullability.NullPassedToNonnull,nullability.NullReturnedFromNonnull,osx.API,osx.MIG,osx.NumberObjectConversion,osx.OSObjectRetainCount,osx.ObjCProperty,osx.SecKeychainAPI,osx.cocoa.AtSync,osx.cocoa.AutoreleaseWrite,osx.cocoa.ClassRelease,osx.cocoa.Dealloc,osx.cocoa.IncompatibleMethodTypes,osx.cocoa.Loops,osx.cocoa.MissingSuperCall,osx.cocoa.NSAutoreleasePool,osx.cocoa.NSError,osx.cocoa.NilArg,osx.cocoa.NonNilReturnValue,osx.cocoa.ObjCGenerics,osx.cocoa.RetainCount,osx.cocoa.RunLoopAutoreleaseLeak,osx.cocoa.SelfInit,osx.cocoa.SuperDealloc,osx.cocoa.UnusedIvars,osx.cocoa.VariadicMethodTypes,osx.coreFoundation.CFError,osx.coreFoundation.CFNumber,osx.coreFoundation.CFRetainRelease,osx.coreFoundation.containers.OutOfBounds,osx.coreFoundation.containers.PointerSizedValues,security.insecureAPI.UncheckedReturn,security.insecureAPI.decodeValueOfObjCType,security.insecureAPI.getpw,security.insecureAPI.gets,security.insecureAPI.mkstemp,security.insecureAPI.mktemp,security.insecureAPI.vfork,unix.API,unix.Malloc,unix.MallocSizeof,unix.MismatchedDeallocator,unix.Vfork,unix.cstring.BadSizeArg,unix.cstring.NullArg") -add_custom_target(ci_test_clang_analyze +add_custom_target(ci_clang_analyze COMMAND CXX=${CLANG_TOOL} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_clang_analyze -DJSON_BuildTests=ON -GNinja COMMAND cd ${PROJECT_BINARY_DIR}/build_clang_analyze && ${SCAN_BUILD_TOOL} -enable-checker ${CLANG_ANALYZER_CHECKS} --use-c++=${CLANG_TOOL} -analyze-headers -o ${PROJECT_BINARY_DIR}/report ninja - COMMENT "Compile and test with Clang Analyzer" + COMMENT "Check code with Clang Analyzer" ) +############################################################################### +# Check code with Cppcheck. +############################################################################### + add_custom_target(ci_cppcheck COMMAND ${CPPCHECK_TOOL} --enable=warning --inline-suppr --inconclusive --force --std=c++11 ${PROJECT_SOURCE_DIR}/single_include/nlohmann/json.hpp --error-exitcode=1 COMMENT "Check code with Cppcheck" ) -file(GLOB_RECURSE SRC_FILES ${PROJECT_SOURCE_DIR}/include/nlohmann/*.hpp) +############################################################################### +# Check code with cpplint. +############################################################################### add_custom_target(ci_cpplint - COMMAND ${CMAKE_SOURCE_DIR}/third_party/cpplint/cpplint.py --filter=-whitespace,-legal,-runtime/references,-runtime/explicit --quiet --recursive ${SRC_FILES} + COMMAND ${Python3_EXECUTABLE} ${CMAKE_SOURCE_DIR}/third_party/cpplint/cpplint.py --filter=-whitespace,-legal,-runtime/references,-runtime/explicit --quiet --recursive ${SRC_FILES} COMMENT "Check code with cpplint" ) +############################################################################### +# Check code with Clang-Tidy. +############################################################################### + add_custom_target(ci_clang_tidy COMMAND CXX=${CLANG_TOOL} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_clang_tidy -DJSON_BuildTests=ON -DJSON_MultipleHeaders=ON -GNinja -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DCMAKE_CXX_CLANG_TIDY=clang-tidy COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_clang_tidy COMMENT "Check code with Clang-Tidy" ) +############################################################################### +# Check code with iwyu. +############################################################################### + +add_custom_target(ci_iwyu + COMMAND CXX=${CLANG_TOOL} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_iwyu -DJSON_BuildTests=ON -DJSON_MultipleHeaders=ON -DCMAKE_EXPORT_COMPILE_COMMANDS=ON + COMMAND cd ${PROJECT_BINARY_DIR}/build_iwyu && ${IWYU_TOOL} -p ${PROJECT_BINARY_DIR}/build_iwyu -j 10 + COMMENT "Check code with iwyu" +) + +############################################################################### +# Check code with PVS-Studio Analyzer . +############################################################################### + add_custom_target(ci_pvs_studio COMMAND CXX=${CLANG_TOOL} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_pvs_studio -DJSON_BuildTests=ON -DCMAKE_EXPORT_COMPILE_COMMANDS=ON COMMAND cd ${PROJECT_BINARY_DIR}/build_pvs_studio && ${PVS_STUDIO_ANALYZER_TOOL} analyze -j 10 @@ -337,20 +381,48 @@ add_custom_target(ci_pvs_studio COMMENT "Check code with PVS Studio" ) +############################################################################### +# Check code with Infer static analyzer. +############################################################################### + +add_custom_target(ci_infer + COMMAND mkdir -p ${PROJECT_BINARY_DIR}/build_infer + COMMAND cd ${PROJECT_BINARY_DIR}/build_infer && ${INFER_TOOL} compile -- ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug ${PROJECT_SOURCE_DIR} -DJSON_BuildTests=ON -DJSON_MultipleHeaders=ON + COMMAND cd ${PROJECT_BINARY_DIR}/build_infer && ${INFER_TOOL} run -- make -j10 + COMMENT "Check code with Infer" +) + +############################################################################### +# Check if every header in the include folder includes sufficient headers to +# be compiled individually. +############################################################################### + foreach(SRC_FILE ${SRC_FILES}) - string(MD5 filename "${SRC_FILE}") - file(WRITE "${PROJECT_BINARY_DIR}/single/${filename}.cpp" "#include <${SRC_FILE}>\nint main() {}\n") - add_executable(single_${filename} EXCLUDE_FROM_ALL ${PROJECT_BINARY_DIR}/single/${filename}.cpp) - target_include_directories(single_${filename} PRIVATE ${PROJECT_SOURCE_DIR}/include) - target_compile_features(single_${filename} PRIVATE cxx_std_11) - list(APPEND single_binaries single_${filename}) + # get relative path of the header file + file(RELATIVE_PATH RELATIVE_SRC_FILE "${PROJECT_SOURCE_DIR}/include/nlohmann" "${SRC_FILE}") + # replace slashes and strip suffix + string(REPLACE "/" "_" RELATIVE_SRC_FILE "${RELATIVE_SRC_FILE}") + string(REPLACE ".hpp" "" RELATIVE_SRC_FILE "${RELATIVE_SRC_FILE}") + # create code file + file(WRITE "${PROJECT_BINARY_DIR}/src_single/${RELATIVE_SRC_FILE}.cpp" "#include \"${SRC_FILE}\"\n\nint main()\n{}\n") + # create executable + add_executable(single_${RELATIVE_SRC_FILE} EXCLUDE_FROM_ALL ${PROJECT_BINARY_DIR}/src_single/${RELATIVE_SRC_FILE}.cpp) + target_include_directories(single_${RELATIVE_SRC_FILE} PRIVATE ${PROJECT_SOURCE_DIR}/include) + target_compile_features(single_${RELATIVE_SRC_FILE} PRIVATE cxx_std_11) + # remember binary for ci_single_binaries target + list(APPEND single_binaries single_${RELATIVE_SRC_FILE}) endforeach() add_custom_target(ci_single_binaries DEPENDS ${single_binaries} + COMMENT "Check if headers are self-contained" ) +############################################################################### +# Clean up all generated files. +############################################################################### + add_custom_target(ci_clean - COMMAND rm -fr ${PROJECT_BINARY_DIR}/build_g++ ${PROJECT_BINARY_DIR}/build_clang ${PROJECT_BINARY_DIR}/build_clang_analyze ${PROJECT_BINARY_DIR}/build_clang_tidy ${PROJECT_BINARY_DIR}/build_pvs_studio ${PROJECT_BINARY_DIR}/build_clang_sanitizer ${single_binaries} + COMMAND rm -fr ${PROJECT_BINARY_DIR}/build_g++ ${PROJECT_BINARY_DIR}/build_clang ${PROJECT_BINARY_DIR}/build_clang_analyze ${PROJECT_BINARY_DIR}/build_clang_tidy ${PROJECT_BINARY_DIR}/build_pvs_studio ${PROJECT_BINARY_DIR}/build_clang_sanitizer ${PROJECT_BINARY_DIR}/build_infer ${PROJECT_BINARY_DIR}/build_iwyu ${single_binaries} COMMENT "Clean generated directories" ) From 4c6cda363b67934fce91c006a8a133a36cc2caa3 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sun, 27 Dec 2020 14:09:20 +0100 Subject: [PATCH 005/113] :loud_sound: add version output --- cmake/ci.cmake | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/cmake/ci.cmake b/cmake/ci.cmake index 05961a4021..d914c1634c 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -1,3 +1,8 @@ +# macOS + +# brew install llvm cppcheck viva64/pvs-studio/pvs-studio iwyu +# brew install gcc --HEAD + ############################################################################### # Needed tools. ############################################################################### @@ -6,10 +11,30 @@ include(FindPython3) find_package(Python3 COMPONENTS Interpreter) find_program(CLANG_TIDY_TOOL NAMES clang-tidy REQUIRED) +execute_process(COMMAND ${CLANG_TIDY_TOOL} --version OUTPUT_VARIABLE CLANG_TIDY_TOOL_VERSION ERROR_VARIABLE CLANG_TIDY_TOOL_VERSION) +string(REGEX MATCH "[0-9]+(\\.[0-9]*)+" CLANG_TIDY_TOOL_VERSION "${CLANG_TIDY_TOOL_VERSION}") +message(STATUS "πŸ”– Clang-Tidy ${CLANG_TIDY_TOOL_VERSION} (${CLANG_TIDY_TOOL})") + find_program(CLANG_TOOL NAMES clang++-HEAD clang++-11 clang++ REQUIRED) +execute_process(COMMAND ${CLANG_TOOL} --version OUTPUT_VARIABLE CLANG_TOOL_VERSION ERROR_VARIABLE CLANG_TOOL_VERSION) +string(REGEX MATCH "[0-9]+(\\.[0-9]*)+" CLANG_TOOL_VERSION "${CLANG_TOOL_VERSION}") +message(STATUS "πŸ”– Clang ${CLANG_TOOL_VERSION} (${CLANG_TOOL})") + find_program(CPPCHECK_TOOL NAMES cppcheck REQUIRED) +execute_process(COMMAND ${CPPCHECK_TOOL} --version OUTPUT_VARIABLE CPPCHECK_TOOL_VERSION ERROR_VARIABLE CPPCHECK_TOOL_VERSION) +string(REGEX MATCH "[0-9]+(\\.[0-9]*)+" CPPCHECK_TOOL_VERSION "${CPPCHECK_TOOL_VERSION}") +message(STATUS "πŸ”– Cppcheck ${CPPCHECK_TOOL_VERSION} (${CPPCHECK_TOOL})") + find_program(GCC_TOOL NAMES g++-HEAD g++-11 g++ REQUIRED) +execute_process(COMMAND ${GCC_TOOL} --version OUTPUT_VARIABLE GCC_TOOL_VERSION ERROR_VARIABLE GCC_TOOL_VERSION) +string(REGEX MATCH "[0-9]+(\\.[0-9]*)+" GCC_TOOL_VERSION "${GCC_TOOL_VERSION}") +message(STATUS "πŸ”– GCC ${GCC_TOOL_VERSION} (${GCC_TOOL})") + find_program(INFER_TOOL NAMES infer REQUIRED) +execute_process(COMMAND ${INFER_TOOL} --version OUTPUT_VARIABLE INFER_TOOL_VERSION ERROR_VARIABLE INFER_TOOL_VERSION) +string(REGEX MATCH "[0-9]+(\\.[0-9]*)+" INFER_TOOL_VERSION "${INFER_TOOL_VERSION}") +message(STATUS "πŸ”– Infer ${INFER_TOOL_VERSION} (${INFER_TOOL})") + find_program(IWYU_TOOL NAMES iwyu_tool.py REQUIRED) find_program(PLOG_CONVERTER_TOOL NAMES plog-converter REQUIRED) find_program(PVS_STUDIO_ANALYZER_TOOL NAMES pvs-studio-analyzer REQUIRED) @@ -317,7 +342,7 @@ add_custom_target(ci_test_clang_sanitizer COMMAND CXX=${CLANG_TOOL} CXXFLAGS=${CLANG_CXX_FLAGS_SANITIZER} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_clang_sanitizer -DJSON_BuildTests=ON -GNinja COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_clang_sanitizer COMMAND cd ${PROJECT_BINARY_DIR}/build_clang_sanitizer/test && ${CMAKE_CTEST_COMMAND} -j10 - COMMENT "Compile and test with Clang" + COMMENT "Compile and test with sanitizers" ) ############################################################################### From 942731844034ba6b62faab3e7aed1d3f7cf474f1 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sun, 27 Dec 2020 15:47:30 +0100 Subject: [PATCH 006/113] :recycle: add target for oclint --- cmake/ci.cmake | 34 +++++++++++++++++++++++++++------- 1 file changed, 27 insertions(+), 7 deletions(-) diff --git a/cmake/ci.cmake b/cmake/ci.cmake index d914c1634c..ad866b1070 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -1,6 +1,8 @@ # macOS -# brew install llvm cppcheck viva64/pvs-studio/pvs-studio iwyu +# brew install llvm cppcheck iwyu infer oclint/formulae/oclint +# brew install viva64/pvs-studio/pvs-studio +# (you will need credentials) # brew install gcc --HEAD ############################################################################### @@ -12,29 +14,30 @@ find_package(Python3 COMPONENTS Interpreter) find_program(CLANG_TIDY_TOOL NAMES clang-tidy REQUIRED) execute_process(COMMAND ${CLANG_TIDY_TOOL} --version OUTPUT_VARIABLE CLANG_TIDY_TOOL_VERSION ERROR_VARIABLE CLANG_TIDY_TOOL_VERSION) -string(REGEX MATCH "[0-9]+(\\.[0-9]*)+" CLANG_TIDY_TOOL_VERSION "${CLANG_TIDY_TOOL_VERSION}") +string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" CLANG_TIDY_TOOL_VERSION "${CLANG_TIDY_TOOL_VERSION}") message(STATUS "πŸ”– Clang-Tidy ${CLANG_TIDY_TOOL_VERSION} (${CLANG_TIDY_TOOL})") find_program(CLANG_TOOL NAMES clang++-HEAD clang++-11 clang++ REQUIRED) execute_process(COMMAND ${CLANG_TOOL} --version OUTPUT_VARIABLE CLANG_TOOL_VERSION ERROR_VARIABLE CLANG_TOOL_VERSION) -string(REGEX MATCH "[0-9]+(\\.[0-9]*)+" CLANG_TOOL_VERSION "${CLANG_TOOL_VERSION}") +string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" CLANG_TOOL_VERSION "${CLANG_TOOL_VERSION}") message(STATUS "πŸ”– Clang ${CLANG_TOOL_VERSION} (${CLANG_TOOL})") find_program(CPPCHECK_TOOL NAMES cppcheck REQUIRED) execute_process(COMMAND ${CPPCHECK_TOOL} --version OUTPUT_VARIABLE CPPCHECK_TOOL_VERSION ERROR_VARIABLE CPPCHECK_TOOL_VERSION) -string(REGEX MATCH "[0-9]+(\\.[0-9]*)+" CPPCHECK_TOOL_VERSION "${CPPCHECK_TOOL_VERSION}") +string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" CPPCHECK_TOOL_VERSION "${CPPCHECK_TOOL_VERSION}") message(STATUS "πŸ”– Cppcheck ${CPPCHECK_TOOL_VERSION} (${CPPCHECK_TOOL})") find_program(GCC_TOOL NAMES g++-HEAD g++-11 g++ REQUIRED) execute_process(COMMAND ${GCC_TOOL} --version OUTPUT_VARIABLE GCC_TOOL_VERSION ERROR_VARIABLE GCC_TOOL_VERSION) -string(REGEX MATCH "[0-9]+(\\.[0-9]*)+" GCC_TOOL_VERSION "${GCC_TOOL_VERSION}") +string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" GCC_TOOL_VERSION "${GCC_TOOL_VERSION}") message(STATUS "πŸ”– GCC ${GCC_TOOL_VERSION} (${GCC_TOOL})") find_program(INFER_TOOL NAMES infer REQUIRED) execute_process(COMMAND ${INFER_TOOL} --version OUTPUT_VARIABLE INFER_TOOL_VERSION ERROR_VARIABLE INFER_TOOL_VERSION) -string(REGEX MATCH "[0-9]+(\\.[0-9]*)+" INFER_TOOL_VERSION "${INFER_TOOL_VERSION}") +string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" INFER_TOOL_VERSION "${INFER_TOOL_VERSION}") message(STATUS "πŸ”– Infer ${INFER_TOOL_VERSION} (${INFER_TOOL})") +find_program(OCLINT_TOOL NAMES oclint-json-compilation-database REQUIRED) find_program(IWYU_TOOL NAMES iwyu_tool.py REQUIRED) find_program(PLOG_CONVERTER_TOOL NAMES plog-converter REQUIRED) find_program(PVS_STUDIO_ANALYZER_TOOL NAMES pvs-studio-analyzer REQUIRED) @@ -375,6 +378,23 @@ add_custom_target(ci_cpplint COMMENT "Check code with cpplint" ) +############################################################################### +# Check code with OCLint. +############################################################################### + +file(COPY ${PROJECT_SOURCE_DIR}/single_include/nlohmann/json.hpp DESTINATION ${PROJECT_BINARY_DIR}/src_single) +file(RENAME ${PROJECT_BINARY_DIR}/src_single/json.hpp ${PROJECT_BINARY_DIR}/src_single/all.cpp) +file(APPEND "${PROJECT_BINARY_DIR}/src_single/all.cpp" "\n\nint main()\n{}\n") + +add_executable(single_all ${PROJECT_BINARY_DIR}/src_single/all.cpp) +target_compile_features(single_all PRIVATE cxx_std_11) + +add_custom_target(ci_oclint + COMMAND ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_oclint -DJSON_BuildTests=OFF -DJSON_CI=ON -DCMAKE_EXPORT_COMPILE_COMMANDS=ON + COMMAND ${OCLINT_TOOL} -i ${PROJECT_BINARY_DIR}/src_single/all.cpp -p ${PROJECT_BINARY_DIR}/build_oclint -- -report-type html -enable-global-analysis -o oclint_report.html + COMMENT "Check code with OCLint" +) + ############################################################################### # Check code with Clang-Tidy. ############################################################################### @@ -448,6 +468,6 @@ add_custom_target(ci_single_binaries ############################################################################### add_custom_target(ci_clean - COMMAND rm -fr ${PROJECT_BINARY_DIR}/build_g++ ${PROJECT_BINARY_DIR}/build_clang ${PROJECT_BINARY_DIR}/build_clang_analyze ${PROJECT_BINARY_DIR}/build_clang_tidy ${PROJECT_BINARY_DIR}/build_pvs_studio ${PROJECT_BINARY_DIR}/build_clang_sanitizer ${PROJECT_BINARY_DIR}/build_infer ${PROJECT_BINARY_DIR}/build_iwyu ${single_binaries} + COMMAND rm -fr ${PROJECT_BINARY_DIR}/build_g++ ${PROJECT_BINARY_DIR}/build_clang ${PROJECT_BINARY_DIR}/build_clang_analyze ${PROJECT_BINARY_DIR}/build_clang_tidy ${PROJECT_BINARY_DIR}/build_pvs_studio ${PROJECT_BINARY_DIR}/build_clang_sanitizer ${PROJECT_BINARY_DIR}/build_infer ${PROJECT_BINARY_DIR}/build_iwyu ${PROJECT_BINARY_DIR}/build_oclint ${single_binaries} COMMENT "Clean generated directories" ) From 77f24509d4f4ca66320018a8e2597b2ac7b1c50b Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Wed, 30 Dec 2020 13:27:29 +0100 Subject: [PATCH 007/113] :rotating_light: fix warnings --- cmake/ci.cmake | 8 +++----- test/src/unit-regression2.cpp | 2 ++ test/src/unit-udt.cpp | 2 +- test/src/unit-user_defined_input.cpp | 7 +++++++ 4 files changed, 13 insertions(+), 6 deletions(-) diff --git a/cmake/ci.cmake b/cmake/ci.cmake index ad866b1070..c7d69760da 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -51,12 +51,10 @@ set(CLANG_CXXFLAGS "-std=c++11 \ -Weverything \ -Wno-c++98-compat \ -Wno-c++98-compat-pedantic \ - -Wno-c++2a-compat \ -Wno-deprecated-declarations \ -Wno-documentation-unknown-command \ -Wno-exit-time-destructors \ - -Wno-float-equal \ - -Wno-missing-prototypes \ + -Wno-extra-semi-stmt \ -Wno-padded \ -Wno-range-loop-analysis \ -Wno-switch-enum -Wno-covered-switch-default \ @@ -193,7 +191,7 @@ set(GCC_CXXFLAGS "-std=c++11 \ -Wmismatched-tags \ -Wmissing-attributes \ -Wmissing-braces \ - -Wno-missing-declarations \ + -Wmissing-declarations \ -Wmissing-field-initializers \ -Wmissing-include-dirs \ -Wmissing-profile \ @@ -301,7 +299,7 @@ set(GCC_CXXFLAGS "-std=c++11 \ -Wunused-const-variable=2 \ -Wunused-function \ -Wunused-label \ - -Wno-unused-local-typedefs \ + -Wunused-local-typedefs \ -Wunused-macros \ -Wunused-parameter \ -Wunused-result \ diff --git a/test/src/unit-regression2.cpp b/test/src/unit-regression2.cpp index 1e8c4922a7..107d16b8cb 100644 --- a/test/src/unit-regression2.cpp +++ b/test/src/unit-regression2.cpp @@ -94,12 +94,14 @@ struct Data std::string b {}; }; +void from_json(const json& j, Data& data); void from_json(const json& j, Data& data) { j["a"].get_to(data.a); j["b"].get_to(data.b); } +bool operator==(Data const& lhs, Data const& rhs); bool operator==(Data const& lhs, Data const& rhs) { return lhs.a == rhs.a && lhs.b == rhs.b; diff --git a/test/src/unit-udt.cpp b/test/src/unit-udt.cpp index b237655056..b333d0b65f 100644 --- a/test/src/unit-udt.cpp +++ b/test/src/unit-udt.cpp @@ -199,7 +199,7 @@ template static void from_json(const BasicJsonType& j, country& c) { const auto str = j.template get(); - static const std::map m = + const std::map m = { {"δΈ­εŽδΊΊζ°‘ε…±ε’Œε›½", country::china}, {"France", country::france}, diff --git a/test/src/unit-user_defined_input.cpp b/test/src/unit-user_defined_input.cpp index 4b84e8e71c..4138460057 100644 --- a/test/src/unit-user_defined_input.cpp +++ b/test/src/unit-user_defined_input.cpp @@ -131,6 +131,13 @@ TEST_CASE("Custom iterator") const char* ptr; }; + // avoid -Wunused-local-typedefs + CHECK(std::is_same::value); + CHECK(std::is_same::value); + CHECK(std::is_same::value); + CHECK(std::is_same::value); + CHECK(std::is_same::value); + MyIterator begin{raw_data}; MyIterator end{raw_data + strlen(raw_data)}; From 0bee42026fe2f1732cea977044d1c72b2b0e3c14 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Wed, 30 Dec 2020 13:36:19 +0100 Subject: [PATCH 008/113] :recycle: rename targets --- cmake/ci.cmake | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cmake/ci.cmake b/cmake/ci.cmake index c7d69760da..8989bd4ef2 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -320,9 +320,9 @@ set(GCC_CXXFLAGS "-std=c++11 \ ") add_custom_target(ci_test_gcc - COMMAND CXX=${GCC_TOOL} CXXFLAGS=${GCC_CXXFLAGS} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_g++ -DJSON_BuildTests=ON -GNinja - COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_g++ - COMMAND cd ${PROJECT_BINARY_DIR}/build_g++/test && ${CMAKE_CTEST_COMMAND} -j10 + COMMAND CXX=${GCC_TOOL} CXXFLAGS=${GCC_CXXFLAGS} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_gcc -DJSON_BuildTests=ON -GNinja + COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_gcc + COMMAND cd ${PROJECT_BINARY_DIR}/build_gcc/test && ${CMAKE_CTEST_COMMAND} -j10 COMMENT "Compile and test with GCC" ) @@ -466,6 +466,6 @@ add_custom_target(ci_single_binaries ############################################################################### add_custom_target(ci_clean - COMMAND rm -fr ${PROJECT_BINARY_DIR}/build_g++ ${PROJECT_BINARY_DIR}/build_clang ${PROJECT_BINARY_DIR}/build_clang_analyze ${PROJECT_BINARY_DIR}/build_clang_tidy ${PROJECT_BINARY_DIR}/build_pvs_studio ${PROJECT_BINARY_DIR}/build_clang_sanitizer ${PROJECT_BINARY_DIR}/build_infer ${PROJECT_BINARY_DIR}/build_iwyu ${PROJECT_BINARY_DIR}/build_oclint ${single_binaries} + COMMAND rm -fr ${PROJECT_BINARY_DIR}/build_gcc ${PROJECT_BINARY_DIR}/build_clang ${PROJECT_BINARY_DIR}/build_clang_analyze ${PROJECT_BINARY_DIR}/build_clang_tidy ${PROJECT_BINARY_DIR}/build_pvs_studio ${PROJECT_BINARY_DIR}/build_clang_sanitizer ${PROJECT_BINARY_DIR}/build_infer ${PROJECT_BINARY_DIR}/build_iwyu ${PROJECT_BINARY_DIR}/build_oclint ${single_binaries} COMMENT "Clean generated directories" ) From 45ec48b2a1be93e9511ec5135e581ac17b5c20e9 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Wed, 30 Dec 2020 14:20:00 +0100 Subject: [PATCH 009/113] :recycle: use iwyu properly --- cmake/ci.cmake | 21 ++++++++------------- include/nlohmann/ordered_map.hpp | 4 ++++ single_include/nlohmann/json.hpp | 4 ++++ 3 files changed, 16 insertions(+), 13 deletions(-) diff --git a/cmake/ci.cmake b/cmake/ci.cmake index 8989bd4ef2..5c9e2e144b 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -37,8 +37,12 @@ execute_process(COMMAND ${INFER_TOOL} --version OUTPUT_VARIABLE INFER_TOOL_VERSI string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" INFER_TOOL_VERSION "${INFER_TOOL_VERSION}") message(STATUS "πŸ”– Infer ${INFER_TOOL_VERSION} (${INFER_TOOL})") +find_program(IWYU_TOOL NAMES include-what-you-use iwyu REQUIRED) +execute_process(COMMAND ${IWYU_TOOL} --version OUTPUT_VARIABLE IWYU_TOOL_VERSION ERROR_VARIABLE IWYU_TOOL_VERSION) +string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" IWYU_TOOL_VERSION "${IWYU_TOOL_VERSION}") +message(STATUS "πŸ”– include-what-you-use ${IWYU_TOOL_VERSION} (${IWYU_TOOL})") + find_program(OCLINT_TOOL NAMES oclint-json-compilation-database REQUIRED) -find_program(IWYU_TOOL NAMES iwyu_tool.py REQUIRED) find_program(PLOG_CONVERTER_TOOL NAMES plog-converter REQUIRED) find_program(PVS_STUDIO_ANALYZER_TOOL NAMES pvs-studio-analyzer REQUIRED) find_program(SCAN_BUILD_TOOL NAMES scan-build REQUIRED) @@ -403,16 +407,6 @@ add_custom_target(ci_clang_tidy COMMENT "Check code with Clang-Tidy" ) -############################################################################### -# Check code with iwyu. -############################################################################### - -add_custom_target(ci_iwyu - COMMAND CXX=${CLANG_TOOL} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_iwyu -DJSON_BuildTests=ON -DJSON_MultipleHeaders=ON -DCMAKE_EXPORT_COMPILE_COMMANDS=ON - COMMAND cd ${PROJECT_BINARY_DIR}/build_iwyu && ${IWYU_TOOL} -p ${PROJECT_BINARY_DIR}/build_iwyu -j 10 - COMMENT "Check code with iwyu" -) - ############################################################################### # Check code with PVS-Studio Analyzer . ############################################################################### @@ -447,11 +441,12 @@ foreach(SRC_FILE ${SRC_FILES}) string(REPLACE "/" "_" RELATIVE_SRC_FILE "${RELATIVE_SRC_FILE}") string(REPLACE ".hpp" "" RELATIVE_SRC_FILE "${RELATIVE_SRC_FILE}") # create code file - file(WRITE "${PROJECT_BINARY_DIR}/src_single/${RELATIVE_SRC_FILE}.cpp" "#include \"${SRC_FILE}\"\n\nint main()\n{}\n") + file(WRITE "${PROJECT_BINARY_DIR}/src_single/${RELATIVE_SRC_FILE}.cpp" "#include \"${SRC_FILE}\" // IWYU pragma: keep\n\nint main()\n{}\n") # create executable add_executable(single_${RELATIVE_SRC_FILE} EXCLUDE_FROM_ALL ${PROJECT_BINARY_DIR}/src_single/${RELATIVE_SRC_FILE}.cpp) target_include_directories(single_${RELATIVE_SRC_FILE} PRIVATE ${PROJECT_SOURCE_DIR}/include) target_compile_features(single_${RELATIVE_SRC_FILE} PRIVATE cxx_std_11) + set_property(TARGET single_${RELATIVE_SRC_FILE} PROPERTY CXX_INCLUDE_WHAT_YOU_USE ${IWYU_TOOL}) # remember binary for ci_single_binaries target list(APPEND single_binaries single_${RELATIVE_SRC_FILE}) endforeach() @@ -466,6 +461,6 @@ add_custom_target(ci_single_binaries ############################################################################### add_custom_target(ci_clean - COMMAND rm -fr ${PROJECT_BINARY_DIR}/build_gcc ${PROJECT_BINARY_DIR}/build_clang ${PROJECT_BINARY_DIR}/build_clang_analyze ${PROJECT_BINARY_DIR}/build_clang_tidy ${PROJECT_BINARY_DIR}/build_pvs_studio ${PROJECT_BINARY_DIR}/build_clang_sanitizer ${PROJECT_BINARY_DIR}/build_infer ${PROJECT_BINARY_DIR}/build_iwyu ${PROJECT_BINARY_DIR}/build_oclint ${single_binaries} + COMMAND rm -fr ${PROJECT_BINARY_DIR}/build_gcc ${PROJECT_BINARY_DIR}/build_clang ${PROJECT_BINARY_DIR}/build_clang_analyze ${PROJECT_BINARY_DIR}/build_clang_tidy ${PROJECT_BINARY_DIR}/build_pvs_studio ${PROJECT_BINARY_DIR}/build_clang_sanitizer ${PROJECT_BINARY_DIR}/build_infer ${PROJECT_BINARY_DIR}/build_oclint ${single_binaries} COMMENT "Clean generated directories" ) diff --git a/include/nlohmann/ordered_map.hpp b/include/nlohmann/ordered_map.hpp index 330677c4d6..cf5f133e70 100644 --- a/include/nlohmann/ordered_map.hpp +++ b/include/nlohmann/ordered_map.hpp @@ -1,7 +1,11 @@ #pragma once #include // less +#include // initializer_list +#include // input_iterator_tag, iterator_traits #include // allocator +#include // for out_of_range +#include // enable_if, is_convertible #include // pair #include // vector diff --git a/single_include/nlohmann/json.hpp b/single_include/nlohmann/json.hpp index 8b6344f921..c35995ad73 100644 --- a/single_include/nlohmann/json.hpp +++ b/single_include/nlohmann/json.hpp @@ -16511,7 +16511,11 @@ class serializer #include // less +#include // initializer_list +#include // input_iterator_tag, iterator_traits #include // allocator +#include // for out_of_range +#include // enable_if, is_convertible #include // pair #include // vector From 7cb49fa3a404cd4584c1407da306470a96be0ce1 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Fri, 1 Jan 2021 10:47:22 +0100 Subject: [PATCH 010/113] :rotating_light: fix warnings --- .../nlohmann/byte_container_with_subtype.hpp | 24 +- .../nlohmann/detail/input/binary_reader.hpp | 8 +- include/nlohmann/detail/input/json_sax.hpp | 2 +- include/nlohmann/detail/json_pointer.hpp | 24 +- .../nlohmann/detail/output/binary_writer.hpp | 9 +- include/nlohmann/detail/value_t.hpp | 2 +- include/nlohmann/json.hpp | 288 +++++++++--------- 7 files changed, 178 insertions(+), 179 deletions(-) diff --git a/include/nlohmann/byte_container_with_subtype.hpp b/include/nlohmann/byte_container_with_subtype.hpp index ee3ab4011b..df68395a25 100644 --- a/include/nlohmann/byte_container_with_subtype.hpp +++ b/include/nlohmann/byte_container_with_subtype.hpp @@ -73,9 +73,9 @@ class byte_container_with_subtype : public BinaryType @exceptionsafety No-throw guarantee: this member function never throws exceptions. - @sa @ref subtype() -- return the binary subtype - @sa @ref clear_subtype() -- clears the binary subtype - @sa @ref has_subtype() -- returns whether or not the binary value has a + @sa see @ref subtype() -- return the binary subtype + @sa see @ref clear_subtype() -- clears the binary subtype + @sa see @ref has_subtype() -- returns whether or not the binary value has a subtype @since version 3.8.0 @@ -100,9 +100,9 @@ class byte_container_with_subtype : public BinaryType @exceptionsafety No-throw guarantee: this member function never throws exceptions. - @sa @ref set_subtype() -- sets the binary subtype - @sa @ref clear_subtype() -- clears the binary subtype - @sa @ref has_subtype() -- returns whether or not the binary value has a + @sa see @ref set_subtype() -- sets the binary subtype + @sa see @ref clear_subtype() -- clears the binary subtype + @sa see @ref has_subtype() -- returns whether or not the binary value has a subtype @since version 3.8.0 @@ -122,9 +122,9 @@ class byte_container_with_subtype : public BinaryType @exceptionsafety No-throw guarantee: this member function never throws exceptions. - @sa @ref subtype() -- return the binary subtype - @sa @ref set_subtype() -- sets the binary subtype - @sa @ref clear_subtype() -- clears the binary subtype + @sa see @ref subtype() -- return the binary subtype + @sa see @ref set_subtype() -- sets the binary subtype + @sa see @ref clear_subtype() -- clears the binary subtype @since version 3.8.0 */ @@ -145,9 +145,9 @@ class byte_container_with_subtype : public BinaryType @exceptionsafety No-throw guarantee: this member function never throws exceptions. - @sa @ref subtype() -- return the binary subtype - @sa @ref set_subtype() -- sets the binary subtype - @sa @ref has_subtype() -- returns whether or not the binary value has a + @sa see @ref subtype() -- return the binary subtype + @sa see @ref set_subtype() -- sets the binary subtype + @sa see @ref has_subtype() -- returns whether or not the binary value has a subtype @since version 3.8.0 diff --git a/include/nlohmann/detail/input/binary_reader.hpp b/include/nlohmann/detail/input/binary_reader.hpp index 806e360306..bfe18a0fbc 100644 --- a/include/nlohmann/detail/input/binary_reader.hpp +++ b/include/nlohmann/detail/input/binary_reader.hpp @@ -89,7 +89,7 @@ class binary_reader @param[in] strict whether to expect the input to be consumed completed @param[in] tag_handler how to treat CBOR tags - @return + @return whether parsing was successful */ JSON_HEDLEY_NON_NULL(3) bool sax_parse(const input_format_t format, @@ -173,7 +173,7 @@ class binary_reader /*! @brief Parses a C-style string from the BSON input. - @param[in, out] result A reference to the string variable where the read + @param[in,out] result A reference to the string variable where the read string is to be stored. @return `true` if the \x00-byte indicating the end of the string was encountered before the EOF; false` indicates an unexpected EOF. @@ -201,7 +201,7 @@ class binary_reader input. @param[in] len The length (including the zero-byte at the end) of the string to be read. - @param[in, out] result A reference to the string variable where the read + @param[in,out] result A reference to the string variable where the read string is to be stored. @tparam NumberType The type of the length @a len @pre len >= 1 @@ -222,7 +222,7 @@ class binary_reader /*! @brief Parses a byte array input of length @a len from the BSON input. @param[in] len The length of the byte array to be read. - @param[in, out] result A reference to the binary variable where the read + @param[in,out] result A reference to the binary variable where the read array is to be stored. @tparam NumberType The type of the length @a len @pre len >= 0 diff --git a/include/nlohmann/detail/input/json_sax.hpp b/include/nlohmann/detail/input/json_sax.hpp index 223acd60eb..c41493d6d2 100644 --- a/include/nlohmann/detail/input/json_sax.hpp +++ b/include/nlohmann/detail/input/json_sax.hpp @@ -156,7 +156,7 @@ class json_sax_dom_parser using binary_t = typename BasicJsonType::binary_t; /*! - @param[in, out] r reference to a JSON value that is manipulated while + @param[in,out] r reference to a JSON value that is manipulated while parsing @param[in] allow_exceptions_ whether parse errors yield exceptions */ diff --git a/include/nlohmann/detail/json_pointer.hpp b/include/nlohmann/detail/json_pointer.hpp index 865376cf18..8c6bda1406 100644 --- a/include/nlohmann/detail/json_pointer.hpp +++ b/include/nlohmann/detail/json_pointer.hpp @@ -87,9 +87,9 @@ class json_pointer @complexity Linear in the length of @a ptr. - @sa @ref operator/=(std::string) to append a reference token - @sa @ref operator/=(std::size_t) to append an array index - @sa @ref operator/(const json_pointer&, const json_pointer&) for a binary operator + @sa see @ref operator/=(std::string) to append a reference token + @sa see @ref operator/=(std::size_t) to append an array index + @sa see @ref operator/(const json_pointer&, const json_pointer&) for a binary operator @since version 3.6.0 */ @@ -111,9 +111,9 @@ class json_pointer @complexity Amortized constant. - @sa @ref operator/=(const json_pointer&) to append a JSON pointer - @sa @ref operator/=(std::size_t) to append an array index - @sa @ref operator/(const json_pointer&, std::size_t) for a binary operator + @sa see @ref operator/=(const json_pointer&) to append a JSON pointer + @sa see @ref operator/=(std::size_t) to append an array index + @sa see @ref operator/(const json_pointer&, std::size_t) for a binary operator @since version 3.6.0 */ @@ -133,9 +133,9 @@ class json_pointer @complexity Amortized constant. - @sa @ref operator/=(const json_pointer&) to append a JSON pointer - @sa @ref operator/=(std::string) to append a reference token - @sa @ref operator/(const json_pointer&, std::string) for a binary operator + @sa see @ref operator/=(const json_pointer&) to append a JSON pointer + @sa see @ref operator/=(std::string) to append a reference token + @sa see @ref operator/(const json_pointer&, std::string) for a binary operator @since version 3.6.0 */ @@ -155,7 +155,7 @@ class json_pointer @complexity Linear in the length of @a lhs and @a rhs. - @sa @ref operator/=(const json_pointer&) to append a JSON pointer + @sa see @ref operator/=(const json_pointer&) to append a JSON pointer @since version 3.6.0 */ @@ -176,7 +176,7 @@ class json_pointer @complexity Linear in the length of @a ptr. - @sa @ref operator/=(std::string) to append a reference token + @sa see @ref operator/=(std::string) to append a reference token @since version 3.6.0 */ @@ -196,7 +196,7 @@ class json_pointer @complexity Linear in the length of @a ptr. - @sa @ref operator/=(std::size_t) to append an array index + @sa see @ref operator/=(std::size_t) to append an array index @since version 3.6.0 */ diff --git a/include/nlohmann/detail/output/binary_writer.hpp b/include/nlohmann/detail/output/binary_writer.hpp index 0c6185e048..3fe102d442 100644 --- a/include/nlohmann/detail/output/binary_writer.hpp +++ b/include/nlohmann/detail/output/binary_writer.hpp @@ -1151,7 +1151,6 @@ class binary_writer key @a name. @param name The name to associate with the JSON entity @a j within the current BSON document - @return The size of the BSON entry */ void write_bson_element(const string_t& name, const BasicJsonType& j) @@ -1196,8 +1195,8 @@ class binary_writer /*! @brief Calculates the size of the BSON serialization of the given JSON-object @a j. - @param[in] j JSON value to serialize - @pre j.type() == value_t::object + @param[in] value JSON value to serialize + @pre value.type() == value_t::object */ static std::size_t calc_bson_object_size(const typename BasicJsonType::object_t& value) { @@ -1211,8 +1210,8 @@ class binary_writer } /*! - @param[in] j JSON value to serialize - @pre j.type() == value_t::object + @param[in] value JSON value to serialize + @pre value.type() == value_t::object */ void write_bson_object(const typename BasicJsonType::object_t& value) { diff --git a/include/nlohmann/detail/value_t.hpp b/include/nlohmann/detail/value_t.hpp index 0383df06f8..a98c4355a0 100644 --- a/include/nlohmann/detail/value_t.hpp +++ b/include/nlohmann/detail/value_t.hpp @@ -32,7 +32,7 @@ number_float), because the library distinguishes these three types for numbers: @ref basic_json::number_float_t is used for floating-point numbers or to approximate integers which do not fit in the limits of their respective type. -@sa @ref basic_json::basic_json(const value_t value_type) -- create a JSON +@sa see @ref basic_json::basic_json(const value_t value_type) -- create a JSON value with the default value for a given type @since version 1.0.0 diff --git a/include/nlohmann/json.hpp b/include/nlohmann/json.hpp index 8c9bef03df..45390edaf3 100644 --- a/include/nlohmann/json.hpp +++ b/include/nlohmann/json.hpp @@ -491,7 +491,7 @@ class basic_json access to object values, a pointer of type `object_t*` must be dereferenced. - @sa @ref array_t -- type for an array value + @sa see @ref array_t -- type for an array value @since version 1.0.0 @@ -549,7 +549,7 @@ class basic_json Arrays are stored as pointers in a @ref basic_json type. That is, for any access to array values, a pointer of type `array_t*` must be dereferenced. - @sa @ref object_t -- type for an object value + @sa see @ref object_t -- type for an object value @since version 1.0.0 */ @@ -698,9 +698,9 @@ class basic_json Integer number values are stored directly inside a @ref basic_json type. - @sa @ref number_float_t -- type for number values (floating-point) + @sa see @ref number_float_t -- type for number values (floating-point) - @sa @ref number_unsigned_t -- type for number values (unsigned integer) + @sa see @ref number_unsigned_t -- type for number values (unsigned integer) @since version 1.0.0 */ @@ -770,8 +770,8 @@ class basic_json Integer number values are stored directly inside a @ref basic_json type. - @sa @ref number_float_t -- type for number values (floating-point) - @sa @ref number_integer_t -- type for number values (integer) + @sa see @ref number_float_t -- type for number values (floating-point) + @sa see @ref number_integer_t -- type for number values (integer) @since version 2.0.0 */ @@ -837,9 +837,9 @@ class basic_json Floating-point number values are stored directly inside a @ref basic_json type. - @sa @ref number_integer_t -- type for number values (integer) + @sa see @ref number_integer_t -- type for number values (integer) - @sa @ref number_unsigned_t -- type for number values (unsigned integer) + @sa see @ref number_unsigned_t -- type for number values (unsigned integer) @since version 1.0.0 */ @@ -910,7 +910,7 @@ class basic_json - If a subtype is given, it is used and added as unsigned 8-bit integer. - If no subtype is given, the generic binary subtype 0x00 is used. - @sa @ref binary -- create a binary array + @sa see @ref binary -- create a binary array @since version 3.8.0 */ @@ -1254,7 +1254,7 @@ class basic_json @image html callback_events.png "Example when certain parse events are triggered" - @sa @ref parser_callback_t for more information and examples + @sa see @ref parser_callback_t for more information and examples */ using parse_event_t = detail::parse_event_t; @@ -1303,7 +1303,7 @@ class basic_json should be kept (`true`) or not (`false`). In the latter case, it is either skipped completely or replaced by an empty discarded object. - @sa @ref parse for examples + @sa see @ref parse for examples @since version 1.0.0 */ @@ -1344,7 +1344,7 @@ class basic_json @liveexample{The following code shows the constructor for different @ref value_t values,basic_json__value_t} - @sa @ref clear() -- restores the postcondition of this constructor + @sa see @ref clear() -- restores the postcondition of this constructor @since version 1.0.0 */ @@ -1598,9 +1598,9 @@ class basic_json @liveexample{The example below shows how JSON values are created from initializer lists.,basic_json__list_init_t} - @sa @ref array(initializer_list_t) -- create a JSON array + @sa see @ref array(initializer_list_t) -- create a JSON array value from an initializer list - @sa @ref object(initializer_list_t) -- create a JSON object + @sa see @ref object(initializer_list_t) -- create a JSON object value from an initializer list @since version 1.0.0 @@ -1780,9 +1780,9 @@ class basic_json @liveexample{The following code shows an example for the `array` function.,array} - @sa @ref basic_json(initializer_list_t, bool, value_t) -- + @sa see @ref basic_json(initializer_list_t, bool, value_t) -- create a JSON value from an initializer list - @sa @ref object(initializer_list_t) -- create a JSON object + @sa see @ref object(initializer_list_t) -- create a JSON object value from an initializer list @since version 1.0.0 @@ -1824,9 +1824,9 @@ class basic_json @liveexample{The following code shows an example for the `object` function.,object} - @sa @ref basic_json(initializer_list_t, bool, value_t) -- + @sa see @ref basic_json(initializer_list_t, bool, value_t) -- create a JSON value from an initializer list - @sa @ref array(initializer_list_t) -- create a JSON array + @sa see @ref array(initializer_list_t) -- create a JSON array value from an initializer list @since version 1.0.0 @@ -2325,8 +2325,8 @@ class basic_json @liveexample{The following code exemplifies `type()` for all JSON types.,type} - @sa @ref operator value_t() -- return the type of the JSON value (implicit) - @sa @ref type_name() -- return the type as string + @sa see @ref operator value_t() -- return the type of the JSON value (implicit) + @sa see @ref type_name() -- return the type as string @since version 1.0.0 */ @@ -2352,12 +2352,12 @@ class basic_json @liveexample{The following code exemplifies `is_primitive()` for all JSON types.,is_primitive} - @sa @ref is_structured() -- returns whether JSON value is structured - @sa @ref is_null() -- returns whether JSON value is `null` - @sa @ref is_string() -- returns whether JSON value is a string - @sa @ref is_boolean() -- returns whether JSON value is a boolean - @sa @ref is_number() -- returns whether JSON value is a number - @sa @ref is_binary() -- returns whether JSON value is a binary array + @sa see @ref is_structured() -- returns whether JSON value is structured + @sa see @ref is_null() -- returns whether JSON value is `null` + @sa see @ref is_string() -- returns whether JSON value is a string + @sa see @ref is_boolean() -- returns whether JSON value is a boolean + @sa see @ref is_number() -- returns whether JSON value is a number + @sa see @ref is_binary() -- returns whether JSON value is a binary array @since version 1.0.0 */ @@ -2382,9 +2382,9 @@ class basic_json @liveexample{The following code exemplifies `is_structured()` for all JSON types.,is_structured} - @sa @ref is_primitive() -- returns whether value is primitive - @sa @ref is_array() -- returns whether value is an array - @sa @ref is_object() -- returns whether value is an object + @sa see @ref is_primitive() -- returns whether value is primitive + @sa see @ref is_array() -- returns whether value is an array + @sa see @ref is_object() -- returns whether value is an object @since version 1.0.0 */ @@ -2454,11 +2454,11 @@ class basic_json @liveexample{The following code exemplifies `is_number()` for all JSON types.,is_number} - @sa @ref is_number_integer() -- check if value is an integer or unsigned + @sa see @ref is_number_integer() -- check if value is an integer or unsigned integer number - @sa @ref is_number_unsigned() -- check if value is an unsigned integer + @sa see @ref is_number_unsigned() -- check if value is an unsigned integer number - @sa @ref is_number_float() -- check if value is a floating-point number + @sa see @ref is_number_float() -- check if value is a floating-point number @since version 1.0.0 */ @@ -2484,10 +2484,10 @@ class basic_json @liveexample{The following code exemplifies `is_number_integer()` for all JSON types.,is_number_integer} - @sa @ref is_number() -- check if value is a number - @sa @ref is_number_unsigned() -- check if value is an unsigned integer + @sa see @ref is_number() -- check if value is a number + @sa see @ref is_number_unsigned() -- check if value is an unsigned integer number - @sa @ref is_number_float() -- check if value is a floating-point number + @sa see @ref is_number_float() -- check if value is a floating-point number @since version 1.0.0 */ @@ -2512,10 +2512,10 @@ class basic_json @liveexample{The following code exemplifies `is_number_unsigned()` for all JSON types.,is_number_unsigned} - @sa @ref is_number() -- check if value is a number - @sa @ref is_number_integer() -- check if value is an integer or unsigned + @sa see @ref is_number() -- check if value is a number + @sa see @ref is_number_integer() -- check if value is an integer or unsigned integer number - @sa @ref is_number_float() -- check if value is a floating-point number + @sa see @ref is_number_float() -- check if value is a floating-point number @since version 2.0.0 */ @@ -2540,9 +2540,9 @@ class basic_json @liveexample{The following code exemplifies `is_number_float()` for all JSON types.,is_number_float} - @sa @ref is_number() -- check if value is number - @sa @ref is_number_integer() -- check if value is an integer number - @sa @ref is_number_unsigned() -- check if value is an unsigned integer + @sa see @ref is_number() -- check if value is number + @sa see @ref is_number_integer() -- check if value is an integer number + @sa see @ref is_number_unsigned() -- check if value is an unsigned integer number @since version 1.0.0 @@ -2683,8 +2683,8 @@ class basic_json @liveexample{The following code exemplifies the @ref value_t operator for all JSON types.,operator__value_t} - @sa @ref type() -- return the type of the JSON value (explicit) - @sa @ref type_name() -- return the type as string + @sa see @ref type() -- return the type of the JSON value (explicit) + @sa see @ref type_name() -- return the type as string @since version 1.0.0 */ @@ -3127,7 +3127,7 @@ class basic_json `nullptr` is returned if the value and the requested pointer type does not match.,get__PointerType} - @sa @ref get_ptr() for explicit pointer-member access + @sa see @ref get_ptr() for explicit pointer-member access @since version 1.0.0 */ @@ -3249,7 +3249,7 @@ class basic_json @throw type_error.302 if the value is not binary - @sa @ref is_binary() to check if the value is binary + @sa see @ref is_binary() to check if the value is binary @since version 3.8.0 */ @@ -3399,9 +3399,9 @@ class basic_json @complexity Logarithmic in the size of the container. - @sa @ref operator[](const typename object_t::key_type&) for unchecked + @sa see @ref operator[](const typename object_t::key_type&) for unchecked access by reference - @sa @ref value() for access by value with a default value + @sa see @ref value() for access by value with a default value @since version 1.0.0 @@ -3450,9 +3450,9 @@ class basic_json @complexity Logarithmic in the size of the container. - @sa @ref operator[](const typename object_t::key_type&) for unchecked + @sa see @ref operator[](const typename object_t::key_type&) for unchecked access by reference - @sa @ref value() for access by value with a default value + @sa see @ref value() for access by value with a default value @since version 1.0.0 @@ -3584,9 +3584,9 @@ class basic_json @liveexample{The example below shows how object elements can be read and written using the `[]` operator.,operatorarray__key_type} - @sa @ref at(const typename object_t::key_type&) for access by reference + @sa see @ref at(const typename object_t::key_type&) for access by reference with range checking - @sa @ref value() for access by value with a default value + @sa see @ref value() for access by value with a default value @since version 1.0.0 */ @@ -3633,9 +3633,9 @@ class basic_json @liveexample{The example below shows how object elements can be read using the `[]` operator.,operatorarray__key_type_const} - @sa @ref at(const typename object_t::key_type&) for access by reference + @sa see @ref at(const typename object_t::key_type&) for access by reference with range checking - @sa @ref value() for access by value with a default value + @sa see @ref value() for access by value with a default value @since version 1.0.0 */ @@ -3672,9 +3672,9 @@ class basic_json @liveexample{The example below shows how object elements can be read and written using the `[]` operator.,operatorarray__key_type} - @sa @ref at(const typename object_t::key_type&) for access by reference + @sa see @ref at(const typename object_t::key_type&) for access by reference with range checking - @sa @ref value() for access by value with a default value + @sa see @ref value() for access by value with a default value @since version 1.1.0 */ @@ -3723,9 +3723,9 @@ class basic_json @liveexample{The example below shows how object elements can be read using the `[]` operator.,operatorarray__key_type_const} - @sa @ref at(const typename object_t::key_type&) for access by reference + @sa see @ref at(const typename object_t::key_type&) for access by reference with range checking - @sa @ref value() for access by value with a default value + @sa see @ref value() for access by value with a default value @since version 1.1.0 */ @@ -3786,9 +3786,9 @@ class basic_json @liveexample{The example below shows how object elements can be queried with a default value.,basic_json__value} - @sa @ref at(const typename object_t::key_type&) for access by reference + @sa see @ref at(const typename object_t::key_type&) for access by reference with range checking - @sa @ref operator[](const typename object_t::key_type&) for unchecked + @sa see @ref operator[](const typename object_t::key_type&) for unchecked access by reference @since version 1.0.0 @@ -3863,7 +3863,7 @@ class basic_json @liveexample{The example below shows how object elements can be queried with a default value.,basic_json__value_ptr} - @sa @ref operator[](const json_pointer&) for unchecked access by reference + @sa see @ref operator[](const json_pointer&) for unchecked access by reference @since version 2.0.2 */ @@ -3919,7 +3919,7 @@ class basic_json @liveexample{The following code shows an example for `front()`.,front} - @sa @ref back() -- access the last element + @sa see @ref back() -- access the last element @since version 1.0.0 */ @@ -3963,7 +3963,7 @@ class basic_json @liveexample{The following code shows an example for `back()`.,back} - @sa @ref front() -- access the first element + @sa see @ref front() -- access the first element @since version 1.0.0 */ @@ -4021,11 +4021,11 @@ class basic_json @liveexample{The example shows the result of `erase()` for different JSON types.,erase__IteratorType} - @sa @ref erase(IteratorType, IteratorType) -- removes the elements in + @sa see @ref erase(IteratorType, IteratorType) -- removes the elements in the given range - @sa @ref erase(const typename object_t::key_type&) -- removes the element + @sa see @ref erase(const typename object_t::key_type&) -- removes the element from an object at the given key - @sa @ref erase(const size_type) -- removes the element from an array at + @sa see @ref erase(const size_type) -- removes the element from an array at the given index @since version 1.0.0 @@ -4135,10 +4135,10 @@ class basic_json @liveexample{The example shows the result of `erase()` for different JSON types.,erase__IteratorType_IteratorType} - @sa @ref erase(IteratorType) -- removes the element at a given position - @sa @ref erase(const typename object_t::key_type&) -- removes the element + @sa see @ref erase(IteratorType) -- removes the element at a given position + @sa see @ref erase(const typename object_t::key_type&) -- removes the element from an object at the given key - @sa @ref erase(const size_type) -- removes the element from an array at + @sa see @ref erase(const size_type) -- removes the element from an array at the given index @since version 1.0.0 @@ -4234,10 +4234,10 @@ class basic_json @liveexample{The example shows the effect of `erase()`.,erase__key_type} - @sa @ref erase(IteratorType) -- removes the element at a given position - @sa @ref erase(IteratorType, IteratorType) -- removes the elements in + @sa see @ref erase(IteratorType) -- removes the element at a given position + @sa see @ref erase(IteratorType, IteratorType) -- removes the elements in the given range - @sa @ref erase(const size_type) -- removes the element from an array at + @sa see @ref erase(const size_type) -- removes the element from an array at the given index @since version 1.0.0 @@ -4269,10 +4269,10 @@ class basic_json @liveexample{The example shows the effect of `erase()`.,erase__size_type} - @sa @ref erase(IteratorType) -- removes the element at a given position - @sa @ref erase(IteratorType, IteratorType) -- removes the elements in + @sa see @ref erase(IteratorType) -- removes the element at a given position + @sa see @ref erase(IteratorType, IteratorType) -- removes the elements in the given range - @sa @ref erase(const typename object_t::key_type&) -- removes the element + @sa see @ref erase(const typename object_t::key_type&) -- removes the element from an object at the given key @since version 1.0.0 @@ -4325,7 +4325,7 @@ class basic_json @liveexample{The example shows how `find()` is used.,find__key_type} - @sa @ref contains(KeyT&&) const -- checks whether a key exists + @sa see @ref contains(KeyT&&) const -- checks whether a key exists @since version 1.0.0 */ @@ -4407,8 +4407,8 @@ class basic_json @liveexample{The following code shows an example for `contains()`.,contains} - @sa @ref find(KeyT&&) -- returns an iterator to an object element - @sa @ref contains(const json_pointer&) const -- checks the existence for a JSON pointer + @sa see @ref find(KeyT&&) -- returns an iterator to an object element + @sa see @ref contains(const json_pointer&) const -- checks the existence for a JSON pointer @since version 3.6.0 */ @@ -4441,7 +4441,7 @@ class basic_json @liveexample{The following code shows an example for `contains()`.,contains_json_pointer} - @sa @ref contains(KeyT &&) const -- checks the existence of a key + @sa see @ref contains(KeyT &&) const -- checks the existence of a key @since version 3.7.0 */ @@ -4478,9 +4478,9 @@ class basic_json @liveexample{The following code shows an example for `begin()`.,begin} - @sa @ref cbegin() -- returns a const iterator to the beginning - @sa @ref end() -- returns an iterator to the end - @sa @ref cend() -- returns a const iterator to the end + @sa see @ref cbegin() -- returns a const iterator to the beginning + @sa see @ref end() -- returns an iterator to the end + @sa see @ref cend() -- returns a const iterator to the end @since version 1.0.0 */ @@ -4518,9 +4518,9 @@ class basic_json @liveexample{The following code shows an example for `cbegin()`.,cbegin} - @sa @ref begin() -- returns an iterator to the beginning - @sa @ref end() -- returns an iterator to the end - @sa @ref cend() -- returns a const iterator to the end + @sa see @ref begin() -- returns an iterator to the beginning + @sa see @ref end() -- returns an iterator to the end + @sa see @ref cend() -- returns a const iterator to the end @since version 1.0.0 */ @@ -4549,9 +4549,9 @@ class basic_json @liveexample{The following code shows an example for `end()`.,end} - @sa @ref cend() -- returns a const iterator to the end - @sa @ref begin() -- returns an iterator to the beginning - @sa @ref cbegin() -- returns a const iterator to the beginning + @sa see @ref cend() -- returns a const iterator to the end + @sa see @ref begin() -- returns an iterator to the beginning + @sa see @ref cbegin() -- returns a const iterator to the beginning @since version 1.0.0 */ @@ -4589,9 +4589,9 @@ class basic_json @liveexample{The following code shows an example for `cend()`.,cend} - @sa @ref end() -- returns an iterator to the end - @sa @ref begin() -- returns an iterator to the beginning - @sa @ref cbegin() -- returns a const iterator to the beginning + @sa see @ref end() -- returns an iterator to the end + @sa see @ref begin() -- returns an iterator to the beginning + @sa see @ref cbegin() -- returns a const iterator to the beginning @since version 1.0.0 */ @@ -4619,9 +4619,9 @@ class basic_json @liveexample{The following code shows an example for `rbegin()`.,rbegin} - @sa @ref crbegin() -- returns a const reverse iterator to the beginning - @sa @ref rend() -- returns a reverse iterator to the end - @sa @ref crend() -- returns a const reverse iterator to the end + @sa see @ref crbegin() -- returns a const reverse iterator to the beginning + @sa see @ref rend() -- returns a reverse iterator to the end + @sa see @ref crend() -- returns a const reverse iterator to the end @since version 1.0.0 */ @@ -4656,9 +4656,9 @@ class basic_json @liveexample{The following code shows an example for `rend()`.,rend} - @sa @ref crend() -- returns a const reverse iterator to the end - @sa @ref rbegin() -- returns a reverse iterator to the beginning - @sa @ref crbegin() -- returns a const reverse iterator to the beginning + @sa see @ref crend() -- returns a const reverse iterator to the end + @sa see @ref rbegin() -- returns a reverse iterator to the beginning + @sa see @ref crbegin() -- returns a const reverse iterator to the beginning @since version 1.0.0 */ @@ -4693,9 +4693,9 @@ class basic_json @liveexample{The following code shows an example for `crbegin()`.,crbegin} - @sa @ref rbegin() -- returns a reverse iterator to the beginning - @sa @ref rend() -- returns a reverse iterator to the end - @sa @ref crend() -- returns a const reverse iterator to the end + @sa see @ref rbegin() -- returns a reverse iterator to the beginning + @sa see @ref rend() -- returns a reverse iterator to the end + @sa see @ref crend() -- returns a const reverse iterator to the end @since version 1.0.0 */ @@ -4722,9 +4722,9 @@ class basic_json @liveexample{The following code shows an example for `crend()`.,crend} - @sa @ref rend() -- returns a reverse iterator to the end - @sa @ref rbegin() -- returns a reverse iterator to the beginning - @sa @ref crbegin() -- returns a const reverse iterator to the beginning + @sa see @ref rend() -- returns a reverse iterator to the end + @sa see @ref rbegin() -- returns a reverse iterator to the beginning + @sa see @ref crbegin() -- returns a const reverse iterator to the beginning @since version 1.0.0 */ @@ -4935,7 +4935,7 @@ class basic_json - The complexity is constant. - Has the semantics of `begin() == end()`. - @sa @ref size() -- returns the number of elements + @sa see @ref size() -- returns the number of elements @since version 1.0.0 */ @@ -5007,8 +5007,8 @@ class basic_json - The complexity is constant. - Has the semantics of `std::distance(begin(), end())`. - @sa @ref empty() -- checks whether the container is empty - @sa @ref max_size() -- returns the maximal number of elements + @sa see @ref empty() -- checks whether the container is empty + @sa see @ref max_size() -- returns the maximal number of elements @since version 1.0.0 */ @@ -5079,7 +5079,7 @@ class basic_json - Has the semantics of returning `b.size()` where `b` is the largest possible JSON value. - @sa @ref size() -- returns the number of elements + @sa see @ref size() -- returns the number of elements @since version 1.0.0 */ @@ -5149,7 +5149,7 @@ class basic_json @exceptionsafety No-throw guarantee: this function never throws exceptions. - @sa @ref basic_json(value_t) -- constructor that creates an object with the + @sa see @ref basic_json(value_t) -- constructor that creates an object with the same value than calling `clear()` @since version 1.0.0 @@ -6923,8 +6923,8 @@ class basic_json @liveexample{The following code exemplifies `type_name()` for all JSON types.,type_name} - @sa @ref type() -- return the type of the JSON value - @sa @ref operator value_t() -- return the type of the JSON value (implicit) + @sa see @ref type() -- return the type of the JSON value + @sa see @ref operator value_t() -- return the type of the JSON value (implicit) @since version 1.0.0, public since 2.1.0, `const char*` and `noexcept` since 3.0.0 @@ -7060,10 +7060,10 @@ class basic_json vector in CBOR format.,to_cbor} @sa http://cbor.io - @sa @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the + @sa see @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the analogous deserialization - @sa @ref to_msgpack(const basic_json&) for the related MessagePack format - @sa @ref to_ubjson(const basic_json&, const bool, const bool) for the + @sa see @ref to_msgpack(const basic_json&) for the related MessagePack format + @sa see @ref to_ubjson(const basic_json&, const bool, const bool) for the related UBJSON format @since version 2.0.9; compact representation of floating-point numbers @@ -7157,9 +7157,9 @@ class basic_json vector in MessagePack format.,to_msgpack} @sa http://msgpack.org - @sa @ref from_msgpack for the analogous deserialization - @sa @ref to_cbor(const basic_json& for the related CBOR format - @sa @ref to_ubjson(const basic_json&, const bool, const bool) for the + @sa see @ref from_msgpack for the analogous deserialization + @sa see @ref to_cbor(const basic_json& for the related CBOR format + @sa see @ref to_ubjson(const basic_json&, const bool, const bool) for the related UBJSON format @since version 2.0.9 @@ -7260,10 +7260,10 @@ class basic_json vector in UBJSON format.,to_ubjson} @sa http://ubjson.org - @sa @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for the + @sa see @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for the analogous deserialization - @sa @ref to_cbor(const basic_json& for the related CBOR format - @sa @ref to_msgpack(const basic_json&) for the related MessagePack format + @sa see @ref to_cbor(const basic_json& for the related CBOR format + @sa see @ref to_msgpack(const basic_json&) for the related MessagePack format @since version 3.1.0 */ @@ -7338,12 +7338,12 @@ class basic_json vector in BSON format.,to_bson} @sa http://bsonspec.org/spec.html - @sa @ref from_bson(detail::input_adapter&&, const bool strict) for the + @sa see @ref from_bson(detail::input_adapter&&, const bool strict) for the analogous deserialization - @sa @ref to_ubjson(const basic_json&, const bool, const bool) for the + @sa see @ref to_ubjson(const basic_json&, const bool, const bool) for the related UBJSON format - @sa @ref to_cbor(const basic_json&) for the related CBOR format - @sa @ref to_msgpack(const basic_json&) for the related MessagePack format + @sa see @ref to_cbor(const basic_json&) for the related CBOR format + @sa see @ref to_msgpack(const basic_json&) for the related MessagePack format */ static std::vector to_bson(const basic_json& j) { @@ -7358,7 +7358,7 @@ class basic_json @param j The JSON object to convert to BSON. @param o The output adapter that receives the binary BSON representation. @pre The input `j` shall be an object: `j.is_object() == true` - @sa @ref to_bson(const basic_json&) + @sa see @ref to_bson(const basic_json&) */ static void to_bson(const basic_json& j, detail::output_adapter o) { @@ -7465,10 +7465,10 @@ class basic_json format to a JSON value.,from_cbor} @sa http://cbor.io - @sa @ref to_cbor(const basic_json&) for the analogous serialization - @sa @ref from_msgpack(detail::input_adapter&&, const bool, const bool) for the + @sa see @ref to_cbor(const basic_json&) for the analogous serialization + @sa see @ref from_msgpack(detail::input_adapter&&, const bool, const bool) for the related MessagePack format - @sa @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for the + @sa see @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for the related UBJSON format @since version 2.0.9; parameter @a start_index since 2.1.1; changed to @@ -7606,12 +7606,12 @@ class basic_json MessagePack format to a JSON value.,from_msgpack} @sa http://msgpack.org - @sa @ref to_msgpack(const basic_json&) for the analogous serialization - @sa @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the + @sa see @ref to_msgpack(const basic_json&) for the analogous serialization + @sa see @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the related CBOR format - @sa @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for + @sa see @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for the related UBJSON format - @sa @ref from_bson(detail::input_adapter&&, const bool, const bool) for + @sa see @ref from_bson(detail::input_adapter&&, const bool, const bool) for the related BSON format @since version 2.0.9; parameter @a start_index since 2.1.1; changed to @@ -7724,13 +7724,13 @@ class basic_json UBJSON format to a JSON value.,from_ubjson} @sa http://ubjson.org - @sa @ref to_ubjson(const basic_json&, const bool, const bool) for the + @sa see @ref to_ubjson(const basic_json&, const bool, const bool) for the analogous serialization - @sa @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the + @sa see @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the related CBOR format - @sa @ref from_msgpack(detail::input_adapter&&, const bool, const bool) for + @sa see @ref from_msgpack(detail::input_adapter&&, const bool, const bool) for the related MessagePack format - @sa @ref from_bson(detail::input_adapter&&, const bool, const bool) for + @sa see @ref from_bson(detail::input_adapter&&, const bool, const bool) for the related BSON format @since version 3.1.0; added @a allow_exceptions parameter since 3.2.0 @@ -7840,12 +7840,12 @@ class basic_json BSON format to a JSON value.,from_bson} @sa http://bsonspec.org/spec.html - @sa @ref to_bson(const basic_json&) for the analogous serialization - @sa @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the + @sa see @ref to_bson(const basic_json&) for the analogous serialization + @sa see @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the related CBOR format - @sa @ref from_msgpack(detail::input_adapter&&, const bool, const bool) for + @sa see @ref from_msgpack(detail::input_adapter&&, const bool, const bool) for the related MessagePack format - @sa @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for the + @sa see @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for the related UBJSON format */ template @@ -8078,7 +8078,7 @@ class basic_json @liveexample{The following code shows how a JSON object is flattened to an object whose keys consist of JSON pointers.,flatten} - @sa @ref unflatten() for the reverse function + @sa see @ref unflatten() for the reverse function @since version 2.0.0 */ @@ -8115,7 +8115,7 @@ class basic_json @liveexample{The following code shows how a flattened JSON object is unflattened into the original nested JSON object.,unflatten} - @sa @ref flatten() for the reverse function + @sa see @ref flatten() for the reverse function @since version 2.0.0 */ @@ -8173,7 +8173,7 @@ class basic_json @liveexample{The following code shows how a JSON patch is applied to a value.,patch} - @sa @ref diff -- create a JSON patch by comparing two JSON values + @sa see @ref diff -- create a JSON patch by comparing two JSON values @sa [RFC 6902 (JSON Patch)](https://tools.ietf.org/html/rfc6902) @sa [RFC 6901 (JSON Pointer)](https://tools.ietf.org/html/rfc6901) @@ -8468,8 +8468,8 @@ class basic_json @liveexample{The following code shows how a JSON patch is created as a diff for two JSON values.,diff} - @sa @ref patch -- apply a JSON patch - @sa @ref merge_patch -- apply a JSON Merge Patch + @sa see @ref patch -- apply a JSON patch + @sa see @ref merge_patch -- apply a JSON Merge Patch @sa [RFC 6902 (JSON Patch)](https://tools.ietf.org/html/rfc6902) @@ -8646,7 +8646,7 @@ class basic_json @liveexample{The following code shows how a JSON Merge Patch is applied to a JSON document.,merge_patch} - @sa @ref patch -- apply a JSON patch + @sa see @ref patch -- apply a JSON patch @sa [RFC 7396 (JSON Merge Patch)](https://tools.ietf.org/html/rfc7396) @since version 3.0.0 From 6085beec352c6d452aaac0773f761d8ceb390598 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Fri, 1 Jan 2021 12:51:48 +0100 Subject: [PATCH 011/113] :rotating_light: fix warnings --- include/nlohmann/detail/input/parser.hpp | 1 + include/nlohmann/json.hpp | 2 +- single_include/nlohmann/json.hpp | 360 +++++++++++------------ 3 files changed, 182 insertions(+), 181 deletions(-) diff --git a/include/nlohmann/detail/input/parser.hpp b/include/nlohmann/detail/input/parser.hpp index ffe483aa1e..43e61d9925 100644 --- a/include/nlohmann/detail/input/parser.hpp +++ b/include/nlohmann/detail/input/parser.hpp @@ -497,5 +497,6 @@ class parser /// whether to throw exceptions in case of errors const bool allow_exceptions = true; }; + } // namespace detail } // namespace nlohmann diff --git a/include/nlohmann/json.hpp b/include/nlohmann/json.hpp index 45390edaf3..d3b545ad40 100644 --- a/include/nlohmann/json.hpp +++ b/include/nlohmann/json.hpp @@ -2867,7 +2867,7 @@ class basic_json @tparam BasicJsonType == @ref basic_json - @return a copy of *this, converted into @tparam BasicJsonType + @return a copy of *this, converted into @a BasicJsonType @complexity Depending on the implementation of the called `from_json()` method. diff --git a/single_include/nlohmann/json.hpp b/single_include/nlohmann/json.hpp index c35995ad73..72c795bb0c 100644 --- a/single_include/nlohmann/json.hpp +++ b/single_include/nlohmann/json.hpp @@ -3455,7 +3455,7 @@ number_float), because the library distinguishes these three types for numbers: @ref basic_json::number_float_t is used for floating-point numbers or to approximate integers which do not fit in the limits of their respective type. -@sa @ref basic_json::basic_json(const value_t value_type) -- create a JSON +@sa see @ref basic_json::basic_json(const value_t value_type) -- create a JSON value with the default value for a given type @since version 1.0.0 @@ -4568,9 +4568,9 @@ class byte_container_with_subtype : public BinaryType @exceptionsafety No-throw guarantee: this member function never throws exceptions. - @sa @ref subtype() -- return the binary subtype - @sa @ref clear_subtype() -- clears the binary subtype - @sa @ref has_subtype() -- returns whether or not the binary value has a + @sa see @ref subtype() -- return the binary subtype + @sa see @ref clear_subtype() -- clears the binary subtype + @sa see @ref has_subtype() -- returns whether or not the binary value has a subtype @since version 3.8.0 @@ -4595,9 +4595,9 @@ class byte_container_with_subtype : public BinaryType @exceptionsafety No-throw guarantee: this member function never throws exceptions. - @sa @ref set_subtype() -- sets the binary subtype - @sa @ref clear_subtype() -- clears the binary subtype - @sa @ref has_subtype() -- returns whether or not the binary value has a + @sa see @ref set_subtype() -- sets the binary subtype + @sa see @ref clear_subtype() -- clears the binary subtype + @sa see @ref has_subtype() -- returns whether or not the binary value has a subtype @since version 3.8.0 @@ -4617,9 +4617,9 @@ class byte_container_with_subtype : public BinaryType @exceptionsafety No-throw guarantee: this member function never throws exceptions. - @sa @ref subtype() -- return the binary subtype - @sa @ref set_subtype() -- sets the binary subtype - @sa @ref clear_subtype() -- clears the binary subtype + @sa see @ref subtype() -- return the binary subtype + @sa see @ref set_subtype() -- sets the binary subtype + @sa see @ref clear_subtype() -- clears the binary subtype @since version 3.8.0 */ @@ -4640,9 +4640,9 @@ class byte_container_with_subtype : public BinaryType @exceptionsafety No-throw guarantee: this member function never throws exceptions. - @sa @ref subtype() -- return the binary subtype - @sa @ref set_subtype() -- sets the binary subtype - @sa @ref has_subtype() -- returns whether or not the binary value has a + @sa see @ref subtype() -- return the binary subtype + @sa see @ref set_subtype() -- sets the binary subtype + @sa see @ref has_subtype() -- returns whether or not the binary value has a subtype @since version 3.8.0 @@ -5448,7 +5448,7 @@ class json_sax_dom_parser using binary_t = typename BasicJsonType::binary_t; /*! - @param[in, out] r reference to a JSON value that is manipulated while + @param[in,out] r reference to a JSON value that is manipulated while parsing @param[in] allow_exceptions_ whether parse errors yield exceptions */ @@ -7840,7 +7840,7 @@ class binary_reader @param[in] strict whether to expect the input to be consumed completed @param[in] tag_handler how to treat CBOR tags - @return + @return whether parsing was successful */ JSON_HEDLEY_NON_NULL(3) bool sax_parse(const input_format_t format, @@ -7924,7 +7924,7 @@ class binary_reader /*! @brief Parses a C-style string from the BSON input. - @param[in, out] result A reference to the string variable where the read + @param[in,out] result A reference to the string variable where the read string is to be stored. @return `true` if the \x00-byte indicating the end of the string was encountered before the EOF; false` indicates an unexpected EOF. @@ -7952,7 +7952,7 @@ class binary_reader input. @param[in] len The length (including the zero-byte at the end) of the string to be read. - @param[in, out] result A reference to the string variable where the read + @param[in,out] result A reference to the string variable where the read string is to be stored. @tparam NumberType The type of the length @a len @pre len >= 1 @@ -7973,7 +7973,7 @@ class binary_reader /*! @brief Parses a byte array input of length @a len from the BSON input. @param[in] len The length of the byte array to be read. - @param[in, out] result A reference to the binary variable where the read + @param[in,out] result A reference to the binary variable where the read array is to be stored. @tparam NumberType The type of the length @a len @pre len >= 0 @@ -10722,6 +10722,7 @@ class parser /// whether to throw exceptions in case of errors const bool allow_exceptions = true; }; + } // namespace detail } // namespace nlohmann @@ -11745,9 +11746,9 @@ class json_pointer @complexity Linear in the length of @a ptr. - @sa @ref operator/=(std::string) to append a reference token - @sa @ref operator/=(std::size_t) to append an array index - @sa @ref operator/(const json_pointer&, const json_pointer&) for a binary operator + @sa see @ref operator/=(std::string) to append a reference token + @sa see @ref operator/=(std::size_t) to append an array index + @sa see @ref operator/(const json_pointer&, const json_pointer&) for a binary operator @since version 3.6.0 */ @@ -11769,9 +11770,9 @@ class json_pointer @complexity Amortized constant. - @sa @ref operator/=(const json_pointer&) to append a JSON pointer - @sa @ref operator/=(std::size_t) to append an array index - @sa @ref operator/(const json_pointer&, std::size_t) for a binary operator + @sa see @ref operator/=(const json_pointer&) to append a JSON pointer + @sa see @ref operator/=(std::size_t) to append an array index + @sa see @ref operator/(const json_pointer&, std::size_t) for a binary operator @since version 3.6.0 */ @@ -11791,9 +11792,9 @@ class json_pointer @complexity Amortized constant. - @sa @ref operator/=(const json_pointer&) to append a JSON pointer - @sa @ref operator/=(std::string) to append a reference token - @sa @ref operator/(const json_pointer&, std::string) for a binary operator + @sa see @ref operator/=(const json_pointer&) to append a JSON pointer + @sa see @ref operator/=(std::string) to append a reference token + @sa see @ref operator/(const json_pointer&, std::string) for a binary operator @since version 3.6.0 */ @@ -11813,7 +11814,7 @@ class json_pointer @complexity Linear in the length of @a lhs and @a rhs. - @sa @ref operator/=(const json_pointer&) to append a JSON pointer + @sa see @ref operator/=(const json_pointer&) to append a JSON pointer @since version 3.6.0 */ @@ -11834,7 +11835,7 @@ class json_pointer @complexity Linear in the length of @a ptr. - @sa @ref operator/=(std::string) to append a reference token + @sa see @ref operator/=(std::string) to append a reference token @since version 3.6.0 */ @@ -11854,7 +11855,7 @@ class json_pointer @complexity Linear in the length of @a ptr. - @sa @ref operator/=(std::size_t) to append an array index + @sa see @ref operator/=(std::size_t) to append an array index @since version 3.6.0 */ @@ -13994,7 +13995,6 @@ class binary_writer key @a name. @param name The name to associate with the JSON entity @a j within the current BSON document - @return The size of the BSON entry */ void write_bson_element(const string_t& name, const BasicJsonType& j) @@ -14039,8 +14039,8 @@ class binary_writer /*! @brief Calculates the size of the BSON serialization of the given JSON-object @a j. - @param[in] j JSON value to serialize - @pre j.type() == value_t::object + @param[in] value JSON value to serialize + @pre value.type() == value_t::object */ static std::size_t calc_bson_object_size(const typename BasicJsonType::object_t& value) { @@ -14054,8 +14054,8 @@ class binary_writer } /*! - @param[in] j JSON value to serialize - @pre j.type() == value_t::object + @param[in] value JSON value to serialize + @pre value.type() == value_t::object */ void write_bson_object(const typename BasicJsonType::object_t& value) { @@ -17119,7 +17119,7 @@ class basic_json access to object values, a pointer of type `object_t*` must be dereferenced. - @sa @ref array_t -- type for an array value + @sa see @ref array_t -- type for an array value @since version 1.0.0 @@ -17177,7 +17177,7 @@ class basic_json Arrays are stored as pointers in a @ref basic_json type. That is, for any access to array values, a pointer of type `array_t*` must be dereferenced. - @sa @ref object_t -- type for an object value + @sa see @ref object_t -- type for an object value @since version 1.0.0 */ @@ -17326,9 +17326,9 @@ class basic_json Integer number values are stored directly inside a @ref basic_json type. - @sa @ref number_float_t -- type for number values (floating-point) + @sa see @ref number_float_t -- type for number values (floating-point) - @sa @ref number_unsigned_t -- type for number values (unsigned integer) + @sa see @ref number_unsigned_t -- type for number values (unsigned integer) @since version 1.0.0 */ @@ -17398,8 +17398,8 @@ class basic_json Integer number values are stored directly inside a @ref basic_json type. - @sa @ref number_float_t -- type for number values (floating-point) - @sa @ref number_integer_t -- type for number values (integer) + @sa see @ref number_float_t -- type for number values (floating-point) + @sa see @ref number_integer_t -- type for number values (integer) @since version 2.0.0 */ @@ -17465,9 +17465,9 @@ class basic_json Floating-point number values are stored directly inside a @ref basic_json type. - @sa @ref number_integer_t -- type for number values (integer) + @sa see @ref number_integer_t -- type for number values (integer) - @sa @ref number_unsigned_t -- type for number values (unsigned integer) + @sa see @ref number_unsigned_t -- type for number values (unsigned integer) @since version 1.0.0 */ @@ -17538,7 +17538,7 @@ class basic_json - If a subtype is given, it is used and added as unsigned 8-bit integer. - If no subtype is given, the generic binary subtype 0x00 is used. - @sa @ref binary -- create a binary array + @sa see @ref binary -- create a binary array @since version 3.8.0 */ @@ -17882,7 +17882,7 @@ class basic_json @image html callback_events.png "Example when certain parse events are triggered" - @sa @ref parser_callback_t for more information and examples + @sa see @ref parser_callback_t for more information and examples */ using parse_event_t = detail::parse_event_t; @@ -17931,7 +17931,7 @@ class basic_json should be kept (`true`) or not (`false`). In the latter case, it is either skipped completely or replaced by an empty discarded object. - @sa @ref parse for examples + @sa see @ref parse for examples @since version 1.0.0 */ @@ -17972,7 +17972,7 @@ class basic_json @liveexample{The following code shows the constructor for different @ref value_t values,basic_json__value_t} - @sa @ref clear() -- restores the postcondition of this constructor + @sa see @ref clear() -- restores the postcondition of this constructor @since version 1.0.0 */ @@ -18226,9 +18226,9 @@ class basic_json @liveexample{The example below shows how JSON values are created from initializer lists.,basic_json__list_init_t} - @sa @ref array(initializer_list_t) -- create a JSON array + @sa see @ref array(initializer_list_t) -- create a JSON array value from an initializer list - @sa @ref object(initializer_list_t) -- create a JSON object + @sa see @ref object(initializer_list_t) -- create a JSON object value from an initializer list @since version 1.0.0 @@ -18408,9 +18408,9 @@ class basic_json @liveexample{The following code shows an example for the `array` function.,array} - @sa @ref basic_json(initializer_list_t, bool, value_t) -- + @sa see @ref basic_json(initializer_list_t, bool, value_t) -- create a JSON value from an initializer list - @sa @ref object(initializer_list_t) -- create a JSON object + @sa see @ref object(initializer_list_t) -- create a JSON object value from an initializer list @since version 1.0.0 @@ -18452,9 +18452,9 @@ class basic_json @liveexample{The following code shows an example for the `object` function.,object} - @sa @ref basic_json(initializer_list_t, bool, value_t) -- + @sa see @ref basic_json(initializer_list_t, bool, value_t) -- create a JSON value from an initializer list - @sa @ref array(initializer_list_t) -- create a JSON array + @sa see @ref array(initializer_list_t) -- create a JSON array value from an initializer list @since version 1.0.0 @@ -18953,8 +18953,8 @@ class basic_json @liveexample{The following code exemplifies `type()` for all JSON types.,type} - @sa @ref operator value_t() -- return the type of the JSON value (implicit) - @sa @ref type_name() -- return the type as string + @sa see @ref operator value_t() -- return the type of the JSON value (implicit) + @sa see @ref type_name() -- return the type as string @since version 1.0.0 */ @@ -18980,12 +18980,12 @@ class basic_json @liveexample{The following code exemplifies `is_primitive()` for all JSON types.,is_primitive} - @sa @ref is_structured() -- returns whether JSON value is structured - @sa @ref is_null() -- returns whether JSON value is `null` - @sa @ref is_string() -- returns whether JSON value is a string - @sa @ref is_boolean() -- returns whether JSON value is a boolean - @sa @ref is_number() -- returns whether JSON value is a number - @sa @ref is_binary() -- returns whether JSON value is a binary array + @sa see @ref is_structured() -- returns whether JSON value is structured + @sa see @ref is_null() -- returns whether JSON value is `null` + @sa see @ref is_string() -- returns whether JSON value is a string + @sa see @ref is_boolean() -- returns whether JSON value is a boolean + @sa see @ref is_number() -- returns whether JSON value is a number + @sa see @ref is_binary() -- returns whether JSON value is a binary array @since version 1.0.0 */ @@ -19010,9 +19010,9 @@ class basic_json @liveexample{The following code exemplifies `is_structured()` for all JSON types.,is_structured} - @sa @ref is_primitive() -- returns whether value is primitive - @sa @ref is_array() -- returns whether value is an array - @sa @ref is_object() -- returns whether value is an object + @sa see @ref is_primitive() -- returns whether value is primitive + @sa see @ref is_array() -- returns whether value is an array + @sa see @ref is_object() -- returns whether value is an object @since version 1.0.0 */ @@ -19082,11 +19082,11 @@ class basic_json @liveexample{The following code exemplifies `is_number()` for all JSON types.,is_number} - @sa @ref is_number_integer() -- check if value is an integer or unsigned + @sa see @ref is_number_integer() -- check if value is an integer or unsigned integer number - @sa @ref is_number_unsigned() -- check if value is an unsigned integer + @sa see @ref is_number_unsigned() -- check if value is an unsigned integer number - @sa @ref is_number_float() -- check if value is a floating-point number + @sa see @ref is_number_float() -- check if value is a floating-point number @since version 1.0.0 */ @@ -19112,10 +19112,10 @@ class basic_json @liveexample{The following code exemplifies `is_number_integer()` for all JSON types.,is_number_integer} - @sa @ref is_number() -- check if value is a number - @sa @ref is_number_unsigned() -- check if value is an unsigned integer + @sa see @ref is_number() -- check if value is a number + @sa see @ref is_number_unsigned() -- check if value is an unsigned integer number - @sa @ref is_number_float() -- check if value is a floating-point number + @sa see @ref is_number_float() -- check if value is a floating-point number @since version 1.0.0 */ @@ -19140,10 +19140,10 @@ class basic_json @liveexample{The following code exemplifies `is_number_unsigned()` for all JSON types.,is_number_unsigned} - @sa @ref is_number() -- check if value is a number - @sa @ref is_number_integer() -- check if value is an integer or unsigned + @sa see @ref is_number() -- check if value is a number + @sa see @ref is_number_integer() -- check if value is an integer or unsigned integer number - @sa @ref is_number_float() -- check if value is a floating-point number + @sa see @ref is_number_float() -- check if value is a floating-point number @since version 2.0.0 */ @@ -19168,9 +19168,9 @@ class basic_json @liveexample{The following code exemplifies `is_number_float()` for all JSON types.,is_number_float} - @sa @ref is_number() -- check if value is number - @sa @ref is_number_integer() -- check if value is an integer number - @sa @ref is_number_unsigned() -- check if value is an unsigned integer + @sa see @ref is_number() -- check if value is number + @sa see @ref is_number_integer() -- check if value is an integer number + @sa see @ref is_number_unsigned() -- check if value is an unsigned integer number @since version 1.0.0 @@ -19311,8 +19311,8 @@ class basic_json @liveexample{The following code exemplifies the @ref value_t operator for all JSON types.,operator__value_t} - @sa @ref type() -- return the type of the JSON value (explicit) - @sa @ref type_name() -- return the type as string + @sa see @ref type() -- return the type of the JSON value (explicit) + @sa see @ref type_name() -- return the type as string @since version 1.0.0 */ @@ -19495,7 +19495,7 @@ class basic_json @tparam BasicJsonType == @ref basic_json - @return a copy of *this, converted into @tparam BasicJsonType + @return a copy of *this, converted into @a BasicJsonType @complexity Depending on the implementation of the called `from_json()` method. @@ -19755,7 +19755,7 @@ class basic_json `nullptr` is returned if the value and the requested pointer type does not match.,get__PointerType} - @sa @ref get_ptr() for explicit pointer-member access + @sa see @ref get_ptr() for explicit pointer-member access @since version 1.0.0 */ @@ -19877,7 +19877,7 @@ class basic_json @throw type_error.302 if the value is not binary - @sa @ref is_binary() to check if the value is binary + @sa see @ref is_binary() to check if the value is binary @since version 3.8.0 */ @@ -20027,9 +20027,9 @@ class basic_json @complexity Logarithmic in the size of the container. - @sa @ref operator[](const typename object_t::key_type&) for unchecked + @sa see @ref operator[](const typename object_t::key_type&) for unchecked access by reference - @sa @ref value() for access by value with a default value + @sa see @ref value() for access by value with a default value @since version 1.0.0 @@ -20078,9 +20078,9 @@ class basic_json @complexity Logarithmic in the size of the container. - @sa @ref operator[](const typename object_t::key_type&) for unchecked + @sa see @ref operator[](const typename object_t::key_type&) for unchecked access by reference - @sa @ref value() for access by value with a default value + @sa see @ref value() for access by value with a default value @since version 1.0.0 @@ -20212,9 +20212,9 @@ class basic_json @liveexample{The example below shows how object elements can be read and written using the `[]` operator.,operatorarray__key_type} - @sa @ref at(const typename object_t::key_type&) for access by reference + @sa see @ref at(const typename object_t::key_type&) for access by reference with range checking - @sa @ref value() for access by value with a default value + @sa see @ref value() for access by value with a default value @since version 1.0.0 */ @@ -20261,9 +20261,9 @@ class basic_json @liveexample{The example below shows how object elements can be read using the `[]` operator.,operatorarray__key_type_const} - @sa @ref at(const typename object_t::key_type&) for access by reference + @sa see @ref at(const typename object_t::key_type&) for access by reference with range checking - @sa @ref value() for access by value with a default value + @sa see @ref value() for access by value with a default value @since version 1.0.0 */ @@ -20300,9 +20300,9 @@ class basic_json @liveexample{The example below shows how object elements can be read and written using the `[]` operator.,operatorarray__key_type} - @sa @ref at(const typename object_t::key_type&) for access by reference + @sa see @ref at(const typename object_t::key_type&) for access by reference with range checking - @sa @ref value() for access by value with a default value + @sa see @ref value() for access by value with a default value @since version 1.1.0 */ @@ -20351,9 +20351,9 @@ class basic_json @liveexample{The example below shows how object elements can be read using the `[]` operator.,operatorarray__key_type_const} - @sa @ref at(const typename object_t::key_type&) for access by reference + @sa see @ref at(const typename object_t::key_type&) for access by reference with range checking - @sa @ref value() for access by value with a default value + @sa see @ref value() for access by value with a default value @since version 1.1.0 */ @@ -20414,9 +20414,9 @@ class basic_json @liveexample{The example below shows how object elements can be queried with a default value.,basic_json__value} - @sa @ref at(const typename object_t::key_type&) for access by reference + @sa see @ref at(const typename object_t::key_type&) for access by reference with range checking - @sa @ref operator[](const typename object_t::key_type&) for unchecked + @sa see @ref operator[](const typename object_t::key_type&) for unchecked access by reference @since version 1.0.0 @@ -20491,7 +20491,7 @@ class basic_json @liveexample{The example below shows how object elements can be queried with a default value.,basic_json__value_ptr} - @sa @ref operator[](const json_pointer&) for unchecked access by reference + @sa see @ref operator[](const json_pointer&) for unchecked access by reference @since version 2.0.2 */ @@ -20547,7 +20547,7 @@ class basic_json @liveexample{The following code shows an example for `front()`.,front} - @sa @ref back() -- access the last element + @sa see @ref back() -- access the last element @since version 1.0.0 */ @@ -20591,7 +20591,7 @@ class basic_json @liveexample{The following code shows an example for `back()`.,back} - @sa @ref front() -- access the first element + @sa see @ref front() -- access the first element @since version 1.0.0 */ @@ -20649,11 +20649,11 @@ class basic_json @liveexample{The example shows the result of `erase()` for different JSON types.,erase__IteratorType} - @sa @ref erase(IteratorType, IteratorType) -- removes the elements in + @sa see @ref erase(IteratorType, IteratorType) -- removes the elements in the given range - @sa @ref erase(const typename object_t::key_type&) -- removes the element + @sa see @ref erase(const typename object_t::key_type&) -- removes the element from an object at the given key - @sa @ref erase(const size_type) -- removes the element from an array at + @sa see @ref erase(const size_type) -- removes the element from an array at the given index @since version 1.0.0 @@ -20763,10 +20763,10 @@ class basic_json @liveexample{The example shows the result of `erase()` for different JSON types.,erase__IteratorType_IteratorType} - @sa @ref erase(IteratorType) -- removes the element at a given position - @sa @ref erase(const typename object_t::key_type&) -- removes the element + @sa see @ref erase(IteratorType) -- removes the element at a given position + @sa see @ref erase(const typename object_t::key_type&) -- removes the element from an object at the given key - @sa @ref erase(const size_type) -- removes the element from an array at + @sa see @ref erase(const size_type) -- removes the element from an array at the given index @since version 1.0.0 @@ -20862,10 +20862,10 @@ class basic_json @liveexample{The example shows the effect of `erase()`.,erase__key_type} - @sa @ref erase(IteratorType) -- removes the element at a given position - @sa @ref erase(IteratorType, IteratorType) -- removes the elements in + @sa see @ref erase(IteratorType) -- removes the element at a given position + @sa see @ref erase(IteratorType, IteratorType) -- removes the elements in the given range - @sa @ref erase(const size_type) -- removes the element from an array at + @sa see @ref erase(const size_type) -- removes the element from an array at the given index @since version 1.0.0 @@ -20897,10 +20897,10 @@ class basic_json @liveexample{The example shows the effect of `erase()`.,erase__size_type} - @sa @ref erase(IteratorType) -- removes the element at a given position - @sa @ref erase(IteratorType, IteratorType) -- removes the elements in + @sa see @ref erase(IteratorType) -- removes the element at a given position + @sa see @ref erase(IteratorType, IteratorType) -- removes the elements in the given range - @sa @ref erase(const typename object_t::key_type&) -- removes the element + @sa see @ref erase(const typename object_t::key_type&) -- removes the element from an object at the given key @since version 1.0.0 @@ -20953,7 +20953,7 @@ class basic_json @liveexample{The example shows how `find()` is used.,find__key_type} - @sa @ref contains(KeyT&&) const -- checks whether a key exists + @sa see @ref contains(KeyT&&) const -- checks whether a key exists @since version 1.0.0 */ @@ -21035,8 +21035,8 @@ class basic_json @liveexample{The following code shows an example for `contains()`.,contains} - @sa @ref find(KeyT&&) -- returns an iterator to an object element - @sa @ref contains(const json_pointer&) const -- checks the existence for a JSON pointer + @sa see @ref find(KeyT&&) -- returns an iterator to an object element + @sa see @ref contains(const json_pointer&) const -- checks the existence for a JSON pointer @since version 3.6.0 */ @@ -21069,7 +21069,7 @@ class basic_json @liveexample{The following code shows an example for `contains()`.,contains_json_pointer} - @sa @ref contains(KeyT &&) const -- checks the existence of a key + @sa see @ref contains(KeyT &&) const -- checks the existence of a key @since version 3.7.0 */ @@ -21106,9 +21106,9 @@ class basic_json @liveexample{The following code shows an example for `begin()`.,begin} - @sa @ref cbegin() -- returns a const iterator to the beginning - @sa @ref end() -- returns an iterator to the end - @sa @ref cend() -- returns a const iterator to the end + @sa see @ref cbegin() -- returns a const iterator to the beginning + @sa see @ref end() -- returns an iterator to the end + @sa see @ref cend() -- returns a const iterator to the end @since version 1.0.0 */ @@ -21146,9 +21146,9 @@ class basic_json @liveexample{The following code shows an example for `cbegin()`.,cbegin} - @sa @ref begin() -- returns an iterator to the beginning - @sa @ref end() -- returns an iterator to the end - @sa @ref cend() -- returns a const iterator to the end + @sa see @ref begin() -- returns an iterator to the beginning + @sa see @ref end() -- returns an iterator to the end + @sa see @ref cend() -- returns a const iterator to the end @since version 1.0.0 */ @@ -21177,9 +21177,9 @@ class basic_json @liveexample{The following code shows an example for `end()`.,end} - @sa @ref cend() -- returns a const iterator to the end - @sa @ref begin() -- returns an iterator to the beginning - @sa @ref cbegin() -- returns a const iterator to the beginning + @sa see @ref cend() -- returns a const iterator to the end + @sa see @ref begin() -- returns an iterator to the beginning + @sa see @ref cbegin() -- returns a const iterator to the beginning @since version 1.0.0 */ @@ -21217,9 +21217,9 @@ class basic_json @liveexample{The following code shows an example for `cend()`.,cend} - @sa @ref end() -- returns an iterator to the end - @sa @ref begin() -- returns an iterator to the beginning - @sa @ref cbegin() -- returns a const iterator to the beginning + @sa see @ref end() -- returns an iterator to the end + @sa see @ref begin() -- returns an iterator to the beginning + @sa see @ref cbegin() -- returns a const iterator to the beginning @since version 1.0.0 */ @@ -21247,9 +21247,9 @@ class basic_json @liveexample{The following code shows an example for `rbegin()`.,rbegin} - @sa @ref crbegin() -- returns a const reverse iterator to the beginning - @sa @ref rend() -- returns a reverse iterator to the end - @sa @ref crend() -- returns a const reverse iterator to the end + @sa see @ref crbegin() -- returns a const reverse iterator to the beginning + @sa see @ref rend() -- returns a reverse iterator to the end + @sa see @ref crend() -- returns a const reverse iterator to the end @since version 1.0.0 */ @@ -21284,9 +21284,9 @@ class basic_json @liveexample{The following code shows an example for `rend()`.,rend} - @sa @ref crend() -- returns a const reverse iterator to the end - @sa @ref rbegin() -- returns a reverse iterator to the beginning - @sa @ref crbegin() -- returns a const reverse iterator to the beginning + @sa see @ref crend() -- returns a const reverse iterator to the end + @sa see @ref rbegin() -- returns a reverse iterator to the beginning + @sa see @ref crbegin() -- returns a const reverse iterator to the beginning @since version 1.0.0 */ @@ -21321,9 +21321,9 @@ class basic_json @liveexample{The following code shows an example for `crbegin()`.,crbegin} - @sa @ref rbegin() -- returns a reverse iterator to the beginning - @sa @ref rend() -- returns a reverse iterator to the end - @sa @ref crend() -- returns a const reverse iterator to the end + @sa see @ref rbegin() -- returns a reverse iterator to the beginning + @sa see @ref rend() -- returns a reverse iterator to the end + @sa see @ref crend() -- returns a const reverse iterator to the end @since version 1.0.0 */ @@ -21350,9 +21350,9 @@ class basic_json @liveexample{The following code shows an example for `crend()`.,crend} - @sa @ref rend() -- returns a reverse iterator to the end - @sa @ref rbegin() -- returns a reverse iterator to the beginning - @sa @ref crbegin() -- returns a const reverse iterator to the beginning + @sa see @ref rend() -- returns a reverse iterator to the end + @sa see @ref rbegin() -- returns a reverse iterator to the beginning + @sa see @ref crbegin() -- returns a const reverse iterator to the beginning @since version 1.0.0 */ @@ -21563,7 +21563,7 @@ class basic_json - The complexity is constant. - Has the semantics of `begin() == end()`. - @sa @ref size() -- returns the number of elements + @sa see @ref size() -- returns the number of elements @since version 1.0.0 */ @@ -21635,8 +21635,8 @@ class basic_json - The complexity is constant. - Has the semantics of `std::distance(begin(), end())`. - @sa @ref empty() -- checks whether the container is empty - @sa @ref max_size() -- returns the maximal number of elements + @sa see @ref empty() -- checks whether the container is empty + @sa see @ref max_size() -- returns the maximal number of elements @since version 1.0.0 */ @@ -21707,7 +21707,7 @@ class basic_json - Has the semantics of returning `b.size()` where `b` is the largest possible JSON value. - @sa @ref size() -- returns the number of elements + @sa see @ref size() -- returns the number of elements @since version 1.0.0 */ @@ -21777,7 +21777,7 @@ class basic_json @exceptionsafety No-throw guarantee: this function never throws exceptions. - @sa @ref basic_json(value_t) -- constructor that creates an object with the + @sa see @ref basic_json(value_t) -- constructor that creates an object with the same value than calling `clear()` @since version 1.0.0 @@ -23551,8 +23551,8 @@ class basic_json @liveexample{The following code exemplifies `type_name()` for all JSON types.,type_name} - @sa @ref type() -- return the type of the JSON value - @sa @ref operator value_t() -- return the type of the JSON value (implicit) + @sa see @ref type() -- return the type of the JSON value + @sa see @ref operator value_t() -- return the type of the JSON value (implicit) @since version 1.0.0, public since 2.1.0, `const char*` and `noexcept` since 3.0.0 @@ -23688,10 +23688,10 @@ class basic_json vector in CBOR format.,to_cbor} @sa http://cbor.io - @sa @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the + @sa see @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the analogous deserialization - @sa @ref to_msgpack(const basic_json&) for the related MessagePack format - @sa @ref to_ubjson(const basic_json&, const bool, const bool) for the + @sa see @ref to_msgpack(const basic_json&) for the related MessagePack format + @sa see @ref to_ubjson(const basic_json&, const bool, const bool) for the related UBJSON format @since version 2.0.9; compact representation of floating-point numbers @@ -23785,9 +23785,9 @@ class basic_json vector in MessagePack format.,to_msgpack} @sa http://msgpack.org - @sa @ref from_msgpack for the analogous deserialization - @sa @ref to_cbor(const basic_json& for the related CBOR format - @sa @ref to_ubjson(const basic_json&, const bool, const bool) for the + @sa see @ref from_msgpack for the analogous deserialization + @sa see @ref to_cbor(const basic_json& for the related CBOR format + @sa see @ref to_ubjson(const basic_json&, const bool, const bool) for the related UBJSON format @since version 2.0.9 @@ -23888,10 +23888,10 @@ class basic_json vector in UBJSON format.,to_ubjson} @sa http://ubjson.org - @sa @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for the + @sa see @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for the analogous deserialization - @sa @ref to_cbor(const basic_json& for the related CBOR format - @sa @ref to_msgpack(const basic_json&) for the related MessagePack format + @sa see @ref to_cbor(const basic_json& for the related CBOR format + @sa see @ref to_msgpack(const basic_json&) for the related MessagePack format @since version 3.1.0 */ @@ -23966,12 +23966,12 @@ class basic_json vector in BSON format.,to_bson} @sa http://bsonspec.org/spec.html - @sa @ref from_bson(detail::input_adapter&&, const bool strict) for the + @sa see @ref from_bson(detail::input_adapter&&, const bool strict) for the analogous deserialization - @sa @ref to_ubjson(const basic_json&, const bool, const bool) for the + @sa see @ref to_ubjson(const basic_json&, const bool, const bool) for the related UBJSON format - @sa @ref to_cbor(const basic_json&) for the related CBOR format - @sa @ref to_msgpack(const basic_json&) for the related MessagePack format + @sa see @ref to_cbor(const basic_json&) for the related CBOR format + @sa see @ref to_msgpack(const basic_json&) for the related MessagePack format */ static std::vector to_bson(const basic_json& j) { @@ -23986,7 +23986,7 @@ class basic_json @param j The JSON object to convert to BSON. @param o The output adapter that receives the binary BSON representation. @pre The input `j` shall be an object: `j.is_object() == true` - @sa @ref to_bson(const basic_json&) + @sa see @ref to_bson(const basic_json&) */ static void to_bson(const basic_json& j, detail::output_adapter o) { @@ -24093,10 +24093,10 @@ class basic_json format to a JSON value.,from_cbor} @sa http://cbor.io - @sa @ref to_cbor(const basic_json&) for the analogous serialization - @sa @ref from_msgpack(detail::input_adapter&&, const bool, const bool) for the + @sa see @ref to_cbor(const basic_json&) for the analogous serialization + @sa see @ref from_msgpack(detail::input_adapter&&, const bool, const bool) for the related MessagePack format - @sa @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for the + @sa see @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for the related UBJSON format @since version 2.0.9; parameter @a start_index since 2.1.1; changed to @@ -24234,12 +24234,12 @@ class basic_json MessagePack format to a JSON value.,from_msgpack} @sa http://msgpack.org - @sa @ref to_msgpack(const basic_json&) for the analogous serialization - @sa @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the + @sa see @ref to_msgpack(const basic_json&) for the analogous serialization + @sa see @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the related CBOR format - @sa @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for + @sa see @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for the related UBJSON format - @sa @ref from_bson(detail::input_adapter&&, const bool, const bool) for + @sa see @ref from_bson(detail::input_adapter&&, const bool, const bool) for the related BSON format @since version 2.0.9; parameter @a start_index since 2.1.1; changed to @@ -24352,13 +24352,13 @@ class basic_json UBJSON format to a JSON value.,from_ubjson} @sa http://ubjson.org - @sa @ref to_ubjson(const basic_json&, const bool, const bool) for the + @sa see @ref to_ubjson(const basic_json&, const bool, const bool) for the analogous serialization - @sa @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the + @sa see @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the related CBOR format - @sa @ref from_msgpack(detail::input_adapter&&, const bool, const bool) for + @sa see @ref from_msgpack(detail::input_adapter&&, const bool, const bool) for the related MessagePack format - @sa @ref from_bson(detail::input_adapter&&, const bool, const bool) for + @sa see @ref from_bson(detail::input_adapter&&, const bool, const bool) for the related BSON format @since version 3.1.0; added @a allow_exceptions parameter since 3.2.0 @@ -24468,12 +24468,12 @@ class basic_json BSON format to a JSON value.,from_bson} @sa http://bsonspec.org/spec.html - @sa @ref to_bson(const basic_json&) for the analogous serialization - @sa @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the + @sa see @ref to_bson(const basic_json&) for the analogous serialization + @sa see @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the related CBOR format - @sa @ref from_msgpack(detail::input_adapter&&, const bool, const bool) for + @sa see @ref from_msgpack(detail::input_adapter&&, const bool, const bool) for the related MessagePack format - @sa @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for the + @sa see @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for the related UBJSON format */ template @@ -24706,7 +24706,7 @@ class basic_json @liveexample{The following code shows how a JSON object is flattened to an object whose keys consist of JSON pointers.,flatten} - @sa @ref unflatten() for the reverse function + @sa see @ref unflatten() for the reverse function @since version 2.0.0 */ @@ -24743,7 +24743,7 @@ class basic_json @liveexample{The following code shows how a flattened JSON object is unflattened into the original nested JSON object.,unflatten} - @sa @ref flatten() for the reverse function + @sa see @ref flatten() for the reverse function @since version 2.0.0 */ @@ -24801,7 +24801,7 @@ class basic_json @liveexample{The following code shows how a JSON patch is applied to a value.,patch} - @sa @ref diff -- create a JSON patch by comparing two JSON values + @sa see @ref diff -- create a JSON patch by comparing two JSON values @sa [RFC 6902 (JSON Patch)](https://tools.ietf.org/html/rfc6902) @sa [RFC 6901 (JSON Pointer)](https://tools.ietf.org/html/rfc6901) @@ -25096,8 +25096,8 @@ class basic_json @liveexample{The following code shows how a JSON patch is created as a diff for two JSON values.,diff} - @sa @ref patch -- apply a JSON patch - @sa @ref merge_patch -- apply a JSON Merge Patch + @sa see @ref patch -- apply a JSON patch + @sa see @ref merge_patch -- apply a JSON Merge Patch @sa [RFC 6902 (JSON Patch)](https://tools.ietf.org/html/rfc6902) @@ -25274,7 +25274,7 @@ class basic_json @liveexample{The following code shows how a JSON Merge Patch is applied to a JSON document.,merge_patch} - @sa @ref patch -- apply a JSON patch + @sa see @ref patch -- apply a JSON patch @sa [RFC 7396 (JSON Merge Patch)](https://tools.ietf.org/html/rfc7396) @since version 3.0.0 From 6bfcea39d024c3e2f687c0283bfd680375b1cda8 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Fri, 1 Jan 2021 12:59:55 +0100 Subject: [PATCH 012/113] :rotating_light: fix warnings --- include/nlohmann/detail/macro_scope.hpp | 6 ------ include/nlohmann/detail/macro_unscope.hpp | 3 --- single_include/nlohmann/json.hpp | 9 --------- 3 files changed, 18 deletions(-) diff --git a/include/nlohmann/detail/macro_scope.hpp b/include/nlohmann/detail/macro_scope.hpp index 77acf04c76..bd36048f1f 100644 --- a/include/nlohmann/detail/macro_scope.hpp +++ b/include/nlohmann/detail/macro_scope.hpp @@ -31,12 +31,6 @@ #define JSON_HAS_CPP_14 #endif -// disable float-equal warnings on GCC/clang -#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__) - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wfloat-equal" -#endif - // disable documentation warnings on clang #if defined(__clang__) #pragma GCC diagnostic push diff --git a/include/nlohmann/detail/macro_unscope.hpp b/include/nlohmann/detail/macro_unscope.hpp index 5ac66f5af8..21c091daf9 100644 --- a/include/nlohmann/detail/macro_unscope.hpp +++ b/include/nlohmann/detail/macro_unscope.hpp @@ -1,9 +1,6 @@ #pragma once // restore GCC/clang diagnostic settings -#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__) - #pragma GCC diagnostic pop -#endif #if defined(__clang__) #pragma GCC diagnostic pop #endif diff --git a/single_include/nlohmann/json.hpp b/single_include/nlohmann/json.hpp index 72c795bb0c..00fc67b654 100644 --- a/single_include/nlohmann/json.hpp +++ b/single_include/nlohmann/json.hpp @@ -2108,12 +2108,6 @@ JSON_HEDLEY_DIAGNOSTIC_POP #define JSON_HAS_CPP_14 #endif -// disable float-equal warnings on GCC/clang -#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__) - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wfloat-equal" -#endif - // disable documentation warnings on clang #if defined(__clang__) #pragma GCC diagnostic push @@ -25427,9 +25421,6 @@ inline nlohmann::json::json_pointer operator "" _json_pointer(const char* s, std // restore GCC/clang diagnostic settings -#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__) - #pragma GCC diagnostic pop -#endif #if defined(__clang__) #pragma GCC diagnostic pop #endif From 8dc3ed11e7ca102b877cbd91882ac5f9aee40ef0 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Fri, 1 Jan 2021 13:08:42 +0100 Subject: [PATCH 013/113] :rotating_light: fix warnings --- include/nlohmann/json.hpp | 39 ++++++++++++++++---------------- single_include/nlohmann/json.hpp | 39 ++++++++++++++++---------------- 2 files changed, 38 insertions(+), 40 deletions(-) diff --git a/include/nlohmann/json.hpp b/include/nlohmann/json.hpp index d3b545ad40..946a8f87f9 100644 --- a/include/nlohmann/json.hpp +++ b/include/nlohmann/json.hpp @@ -1420,8 +1420,7 @@ class basic_json - @a CompatibleType is not a different @ref basic_json type (i.e. with different template arguments) - @a CompatibleType is not a @ref basic_json nested type (e.g., @ref json_pointer, @ref iterator, etc ...) - - @ref @ref json_serializer has a - `to_json(basic_json_t&, CompatibleType&&)` method + - `json_serializer` has a `to_json(basic_json_t&, CompatibleType&&)` method @tparam U = `uncvref_t` @@ -6049,7 +6048,7 @@ class basic_json } } - /// @copydoc swap(binary_t) + /// @copydoc swap(binary_t&) void swap(typename binary_t::container_type& other) { // swap only works for strings @@ -7060,7 +7059,7 @@ class basic_json vector in CBOR format.,to_cbor} @sa http://cbor.io - @sa see @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the + @sa see @ref from_cbor(InputType&&, const bool, const bool, const cbor_tag_handler_t) for the analogous deserialization @sa see @ref to_msgpack(const basic_json&) for the related MessagePack format @sa see @ref to_ubjson(const basic_json&, const bool, const bool) for the @@ -7260,7 +7259,7 @@ class basic_json vector in UBJSON format.,to_ubjson} @sa http://ubjson.org - @sa see @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for the + @sa see @ref from_ubjson(InputType&&, const bool, const bool) for the analogous deserialization @sa see @ref to_cbor(const basic_json& for the related CBOR format @sa see @ref to_msgpack(const basic_json&) for the related MessagePack format @@ -7466,9 +7465,9 @@ class basic_json @sa http://cbor.io @sa see @ref to_cbor(const basic_json&) for the analogous serialization - @sa see @ref from_msgpack(detail::input_adapter&&, const bool, const bool) for the + @sa see @ref from_msgpack(InputType&&, const bool, const bool) for the related MessagePack format - @sa see @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for the + @sa see @ref from_ubjson(InputType&&, const bool, const bool) for the related UBJSON format @since version 2.0.9; parameter @a start_index since 2.1.1; changed to @@ -7491,7 +7490,7 @@ class basic_json } /*! - @copydoc from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) + @copydoc from_cbor(InputType&&, const bool, const bool, const cbor_tag_handler_t) */ template JSON_HEDLEY_WARN_UNUSED_RESULT @@ -7607,11 +7606,11 @@ class basic_json @sa http://msgpack.org @sa see @ref to_msgpack(const basic_json&) for the analogous serialization - @sa see @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the + @sa see @ref from_cbor(InputType&&, const bool, const bool, const cbor_tag_handler_t) for the related CBOR format - @sa see @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for + @sa see @ref from_ubjson(InputType&&, const bool, const bool) for the related UBJSON format - @sa see @ref from_bson(detail::input_adapter&&, const bool, const bool) for + @sa see @ref from_bson(InputType&&, const bool, const bool) for the related BSON format @since version 2.0.9; parameter @a start_index since 2.1.1; changed to @@ -7633,7 +7632,7 @@ class basic_json } /*! - @copydoc from_msgpack(detail::input_adapter&&, const bool, const bool) + @copydoc from_msgpack(InputType&&, const bool, const bool) */ template JSON_HEDLEY_WARN_UNUSED_RESULT @@ -7726,11 +7725,11 @@ class basic_json @sa http://ubjson.org @sa see @ref to_ubjson(const basic_json&, const bool, const bool) for the analogous serialization - @sa see @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the + @sa see @ref from_cbor(InputType&&, const bool, const bool, const cbor_tag_handler_t) for the related CBOR format - @sa see @ref from_msgpack(detail::input_adapter&&, const bool, const bool) for + @sa see @ref from_msgpack(InputType&&, const bool, const bool) for the related MessagePack format - @sa see @ref from_bson(detail::input_adapter&&, const bool, const bool) for + @sa see @ref from_bson(InputType&&, const bool, const bool) for the related BSON format @since version 3.1.0; added @a allow_exceptions parameter since 3.2.0 @@ -7749,7 +7748,7 @@ class basic_json } /*! - @copydoc from_ubjson(detail::input_adapter&&, const bool, const bool) + @copydoc from_ubjson(InputType&&, const bool, const bool) */ template JSON_HEDLEY_WARN_UNUSED_RESULT @@ -7841,11 +7840,11 @@ class basic_json @sa http://bsonspec.org/spec.html @sa see @ref to_bson(const basic_json&) for the analogous serialization - @sa see @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the + @sa see @ref from_cbor(InputType&&, const bool, const bool, const cbor_tag_handler_t) for the related CBOR format - @sa see @ref from_msgpack(detail::input_adapter&&, const bool, const bool) for + @sa see @ref from_msgpack(InputType&&, const bool, const bool) for the related MessagePack format - @sa see @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for the + @sa see @ref from_ubjson(InputType&&, const bool, const bool) for the related UBJSON format */ template @@ -7862,7 +7861,7 @@ class basic_json } /*! - @copydoc from_bson(detail::input_adapter&&, const bool, const bool) + @copydoc from_bson(InputType&&, const bool, const bool) */ template JSON_HEDLEY_WARN_UNUSED_RESULT diff --git a/single_include/nlohmann/json.hpp b/single_include/nlohmann/json.hpp index 00fc67b654..4ea8e4c71a 100644 --- a/single_include/nlohmann/json.hpp +++ b/single_include/nlohmann/json.hpp @@ -18042,8 +18042,7 @@ class basic_json - @a CompatibleType is not a different @ref basic_json type (i.e. with different template arguments) - @a CompatibleType is not a @ref basic_json nested type (e.g., @ref json_pointer, @ref iterator, etc ...) - - @ref @ref json_serializer has a - `to_json(basic_json_t&, CompatibleType&&)` method + - `json_serializer` has a `to_json(basic_json_t&, CompatibleType&&)` method @tparam U = `uncvref_t` @@ -22671,7 +22670,7 @@ class basic_json } } - /// @copydoc swap(binary_t) + /// @copydoc swap(binary_t&) void swap(typename binary_t::container_type& other) { // swap only works for strings @@ -23682,7 +23681,7 @@ class basic_json vector in CBOR format.,to_cbor} @sa http://cbor.io - @sa see @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the + @sa see @ref from_cbor(InputType&&, const bool, const bool, const cbor_tag_handler_t) for the analogous deserialization @sa see @ref to_msgpack(const basic_json&) for the related MessagePack format @sa see @ref to_ubjson(const basic_json&, const bool, const bool) for the @@ -23882,7 +23881,7 @@ class basic_json vector in UBJSON format.,to_ubjson} @sa http://ubjson.org - @sa see @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for the + @sa see @ref from_ubjson(InputType&&, const bool, const bool) for the analogous deserialization @sa see @ref to_cbor(const basic_json& for the related CBOR format @sa see @ref to_msgpack(const basic_json&) for the related MessagePack format @@ -24088,9 +24087,9 @@ class basic_json @sa http://cbor.io @sa see @ref to_cbor(const basic_json&) for the analogous serialization - @sa see @ref from_msgpack(detail::input_adapter&&, const bool, const bool) for the + @sa see @ref from_msgpack(InputType&&, const bool, const bool) for the related MessagePack format - @sa see @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for the + @sa see @ref from_ubjson(InputType&&, const bool, const bool) for the related UBJSON format @since version 2.0.9; parameter @a start_index since 2.1.1; changed to @@ -24113,7 +24112,7 @@ class basic_json } /*! - @copydoc from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) + @copydoc from_cbor(InputType&&, const bool, const bool, const cbor_tag_handler_t) */ template JSON_HEDLEY_WARN_UNUSED_RESULT @@ -24229,11 +24228,11 @@ class basic_json @sa http://msgpack.org @sa see @ref to_msgpack(const basic_json&) for the analogous serialization - @sa see @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the + @sa see @ref from_cbor(InputType&&, const bool, const bool, const cbor_tag_handler_t) for the related CBOR format - @sa see @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for + @sa see @ref from_ubjson(InputType&&, const bool, const bool) for the related UBJSON format - @sa see @ref from_bson(detail::input_adapter&&, const bool, const bool) for + @sa see @ref from_bson(InputType&&, const bool, const bool) for the related BSON format @since version 2.0.9; parameter @a start_index since 2.1.1; changed to @@ -24255,7 +24254,7 @@ class basic_json } /*! - @copydoc from_msgpack(detail::input_adapter&&, const bool, const bool) + @copydoc from_msgpack(InputType&&, const bool, const bool) */ template JSON_HEDLEY_WARN_UNUSED_RESULT @@ -24348,11 +24347,11 @@ class basic_json @sa http://ubjson.org @sa see @ref to_ubjson(const basic_json&, const bool, const bool) for the analogous serialization - @sa see @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the + @sa see @ref from_cbor(InputType&&, const bool, const bool, const cbor_tag_handler_t) for the related CBOR format - @sa see @ref from_msgpack(detail::input_adapter&&, const bool, const bool) for + @sa see @ref from_msgpack(InputType&&, const bool, const bool) for the related MessagePack format - @sa see @ref from_bson(detail::input_adapter&&, const bool, const bool) for + @sa see @ref from_bson(InputType&&, const bool, const bool) for the related BSON format @since version 3.1.0; added @a allow_exceptions parameter since 3.2.0 @@ -24371,7 +24370,7 @@ class basic_json } /*! - @copydoc from_ubjson(detail::input_adapter&&, const bool, const bool) + @copydoc from_ubjson(InputType&&, const bool, const bool) */ template JSON_HEDLEY_WARN_UNUSED_RESULT @@ -24463,11 +24462,11 @@ class basic_json @sa http://bsonspec.org/spec.html @sa see @ref to_bson(const basic_json&) for the analogous serialization - @sa see @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the + @sa see @ref from_cbor(InputType&&, const bool, const bool, const cbor_tag_handler_t) for the related CBOR format - @sa see @ref from_msgpack(detail::input_adapter&&, const bool, const bool) for + @sa see @ref from_msgpack(InputType&&, const bool, const bool) for the related MessagePack format - @sa see @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for the + @sa see @ref from_ubjson(InputType&&, const bool, const bool) for the related UBJSON format */ template @@ -24484,7 +24483,7 @@ class basic_json } /*! - @copydoc from_bson(detail::input_adapter&&, const bool, const bool) + @copydoc from_bson(InputType&&, const bool, const bool) */ template JSON_HEDLEY_WARN_UNUSED_RESULT From a6b82cd50b4811419b3c8ff1b044b958a05dba9d Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Fri, 1 Jan 2021 13:08:58 +0100 Subject: [PATCH 014/113] :recycle: use iwyu properly --- cmake/ci.cmake | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cmake/ci.cmake b/cmake/ci.cmake index 5c9e2e144b..65d4fc7e03 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -331,7 +331,7 @@ add_custom_target(ci_test_gcc ) add_custom_target(ci_test_clang - COMMAND CXX=${CLANG_TOOL} CXXFLAGS=${CLANG_CXXFLAGS} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_clang -DJSON_BuildTests=ON -GNinja + COMMAND CXX=${CLANG_TOOL} CXXFLAGS=${CLANG_CXXFLAGS} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_clang -DJSON_BuildTests=ON -DJSON_MultipleHeaders=ON -GNinja COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_clang COMMAND cd ${PROJECT_BINARY_DIR}/build_clang/test && ${CMAKE_CTEST_COMMAND} -j10 COMMENT "Compile and test with Clang" @@ -434,6 +434,8 @@ add_custom_target(ci_infer # be compiled individually. ############################################################################### +set(iwyu_path_and_options ${IWYU_TOOL} -Xiwyu --max_line_length=300) + foreach(SRC_FILE ${SRC_FILES}) # get relative path of the header file file(RELATIVE_PATH RELATIVE_SRC_FILE "${PROJECT_SOURCE_DIR}/include/nlohmann" "${SRC_FILE}") @@ -446,7 +448,7 @@ foreach(SRC_FILE ${SRC_FILES}) add_executable(single_${RELATIVE_SRC_FILE} EXCLUDE_FROM_ALL ${PROJECT_BINARY_DIR}/src_single/${RELATIVE_SRC_FILE}.cpp) target_include_directories(single_${RELATIVE_SRC_FILE} PRIVATE ${PROJECT_SOURCE_DIR}/include) target_compile_features(single_${RELATIVE_SRC_FILE} PRIVATE cxx_std_11) - set_property(TARGET single_${RELATIVE_SRC_FILE} PROPERTY CXX_INCLUDE_WHAT_YOU_USE ${IWYU_TOOL}) + set_property(TARGET single_${RELATIVE_SRC_FILE} PROPERTY CXX_INCLUDE_WHAT_YOU_USE "${iwyu_path_and_options}") # remember binary for ci_single_binaries target list(APPEND single_binaries single_${RELATIVE_SRC_FILE}) endforeach() From 93fad232c5714afcc26999f59020f4ddca0f9f99 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sun, 3 Jan 2021 20:44:22 +0100 Subject: [PATCH 015/113] :recycle: add target for benchmarks --- cmake/ci.cmake | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/cmake/ci.cmake b/cmake/ci.cmake index 65d4fc7e03..3485a4fde8 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -458,11 +458,22 @@ add_custom_target(ci_single_binaries COMMENT "Check if headers are self-contained" ) +############################################################################### +# Benchmarks +############################################################################### + +add_custom_target(ci_benchmarks + COMMAND ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Release -S${PROJECT_SOURCE_DIR}/benchmarks -B${PROJECT_BINARY_DIR}/build_benchmarks + COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_benchmarks --target json_benchmarks + COMMAND cd ${PROJECT_BINARY_DIR}/build_benchmarks && ./json_benchmarks + COMMENT "Run benchmarks" +) + ############################################################################### # Clean up all generated files. ############################################################################### add_custom_target(ci_clean - COMMAND rm -fr ${PROJECT_BINARY_DIR}/build_gcc ${PROJECT_BINARY_DIR}/build_clang ${PROJECT_BINARY_DIR}/build_clang_analyze ${PROJECT_BINARY_DIR}/build_clang_tidy ${PROJECT_BINARY_DIR}/build_pvs_studio ${PROJECT_BINARY_DIR}/build_clang_sanitizer ${PROJECT_BINARY_DIR}/build_infer ${PROJECT_BINARY_DIR}/build_oclint ${single_binaries} + COMMAND rm -fr ${PROJECT_BINARY_DIR}/build_gcc ${PROJECT_BINARY_DIR}/build_clang ${PROJECT_BINARY_DIR}/build_clang_analyze ${PROJECT_BINARY_DIR}/build_clang_tidy ${PROJECT_BINARY_DIR}/build_pvs_studio ${PROJECT_BINARY_DIR}/build_clang_sanitizer ${PROJECT_BINARY_DIR}/build_infer ${PROJECT_BINARY_DIR}/build_oclint ${PROJECT_BINARY_DIR}/build_benchmarks ${single_binaries} COMMENT "Clean generated directories" ) From dc0a921a4775948b8d0b0738b41c40b147b13dc2 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sun, 3 Jan 2021 21:58:30 +0100 Subject: [PATCH 016/113] :recycle: add target for CMake flags --- cmake/ci.cmake | 38 +++++++++++++++++++++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-) diff --git a/cmake/ci.cmake b/cmake/ci.cmake index 3485a4fde8..9cec0ed94e 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -469,11 +469,47 @@ add_custom_target(ci_benchmarks COMMENT "Run benchmarks" ) +############################################################################### +# CMake flags +############################################################################### + +add_custom_command( + OUTPUT cmake-3.1.0-Darwin64 + COMMAND wget https://github.com/Kitware/CMake/releases/download/v3.1.0/cmake-3.1.0-Darwin64.tar.gz + COMMAND tar xfz cmake-3.1.0-Darwin64.tar.gz + COMMAND rm cmake-3.1.0-Darwin64.tar.gz + WORKING_DIRECTORY ${PROJECT_BINARY_DIR} + COMMENT "Download CMake 3.1.0" +) + +set(JSON_CMAKE_FLAGS "JSON_BuildTests;JSON_Install;JSON_MultipleHeaders;JSON_Sanitizer;JSON_Valgrind;JSON_NoExceptions;JSON_Coverage") + +foreach(JSON_CMAKE_FLAG ${JSON_CMAKE_FLAGS}) + string(TOLOWER "ci_cmake_flag_${JSON_CMAKE_FLAG}" JSON_CMAKE_FLAG_TARGET) + add_custom_target("${JSON_CMAKE_FLAG_TARGET}" + COMMENT "Check CMake flag ${JSON_CMAKE_FLAG} (CMake ${CMAKE_VERSION})" + COMMAND ${CMAKE_COMMAND} -Werror=dev -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_${JSON_CMAKE_FLAG_TARGET} -D${JSON_CMAKE_FLAG}=ON + ) + add_custom_target("${JSON_CMAKE_FLAG_TARGET}_31" + COMMENT "Check CMake flag ${JSON_CMAKE_FLAG} (CMake 3.1)" + COMMAND mkdir ${PROJECT_BINARY_DIR}/build_${JSON_CMAKE_FLAG_TARGET}_31 + COMMAND cd ${PROJECT_BINARY_DIR}/build_${JSON_CMAKE_FLAG_TARGET}_31 && ${PROJECT_BINARY_DIR}/cmake-3.1.0-Darwin64/CMake.app/Contents/bin/cmake -Werror=dev ${PROJECT_SOURCE_DIR} -D${JSON_CMAKE_FLAG}=ON -DCMAKE_CXX_COMPILE_FEATURES="cxx_range_for" -DCMAKE_CXX_FLAGS="-std=gnu++11" + DEPENDS cmake-3.1.0-Darwin64 + ) + list(APPEND JSON_CMAKE_FLAG_TARGETS ${JSON_CMAKE_FLAG_TARGET} ${JSON_CMAKE_FLAG_TARGET}_31) + list(APPEND JSON_CMAKE_FLAG_BUILD_DIRS ${PROJECT_BINARY_DIR}/build_${JSON_CMAKE_FLAG_TARGET} ${PROJECT_BINARY_DIR}/build_${JSON_CMAKE_FLAG_TARGET}_31) +endforeach() + +add_custom_target(ci_cmake_flags + DEPENDS ${JSON_CMAKE_FLAG_TARGETS} + COMMENT "Check CMake flags" +) + ############################################################################### # Clean up all generated files. ############################################################################### add_custom_target(ci_clean - COMMAND rm -fr ${PROJECT_BINARY_DIR}/build_gcc ${PROJECT_BINARY_DIR}/build_clang ${PROJECT_BINARY_DIR}/build_clang_analyze ${PROJECT_BINARY_DIR}/build_clang_tidy ${PROJECT_BINARY_DIR}/build_pvs_studio ${PROJECT_BINARY_DIR}/build_clang_sanitizer ${PROJECT_BINARY_DIR}/build_infer ${PROJECT_BINARY_DIR}/build_oclint ${PROJECT_BINARY_DIR}/build_benchmarks ${single_binaries} + COMMAND rm -fr ${PROJECT_BINARY_DIR}/build_gcc ${PROJECT_BINARY_DIR}/build_clang ${PROJECT_BINARY_DIR}/build_clang_analyze ${PROJECT_BINARY_DIR}/build_clang_tidy ${PROJECT_BINARY_DIR}/build_pvs_studio ${PROJECT_BINARY_DIR}/build_clang_sanitizer ${PROJECT_BINARY_DIR}/build_infer ${PROJECT_BINARY_DIR}/build_oclint ${PROJECT_BINARY_DIR}/build_benchmarks cmake-3.1.0-Darwin64 ${JSON_CMAKE_FLAG_BUILD_DIRS} ${single_binaries} COMMENT "Clean generated directories" ) From 4a56f12cf41a19e1ed665eb08c2c9341fb7e3d91 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sun, 17 Jan 2021 14:37:38 +0100 Subject: [PATCH 017/113] :construction_worker: use GitHub Actions --- .github/workflows/ubuntu.yml | 40 ++++++++++++++++++++++++++++++++++-- cmake/ci.cmake | 20 +++++++++--------- 2 files changed, 48 insertions(+), 12 deletions(-) diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index d6b6540779..7f932063a2 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -28,7 +28,7 @@ jobs: steps: - uses: actions/checkout@v1 - - name: install_gcc + - name: install_clang run: | sudo apt update sudo apt install clang-10 @@ -48,7 +48,7 @@ jobs: steps: - uses: actions/checkout@v1 - - name: install_gcc + - name: install_clang run: | sudo apt update sudo apt install clang-10 @@ -62,3 +62,39 @@ jobs: run: cmake --build build --parallel 10 - name: test run: cd build ; ctest -j 10 --output-on-failure + + ci_test_gcc: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v1 + - name: install_gcc + run: | + sudo apt update + sudo apt install gcc-10 g++-10 + shell: bash + - name: cmake + run: cmake -S . -B build -DJSON_CI=On + env: + CC: gcc-10 + CXX: g++-10 + - name: build + run: cmake --build build --target ci_test_gcc --parallel 10 + + ci_test_clang: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v1 + - name: install_clang + run: | + sudo apt update + sudo apt install clang-10 + shell: bash + - name: cmake + run: cmake -S . -B build -DJSON_CI=On + env: + CC: clang-10 + CXX: clang++-10 + - name: build + run: cmake --build build --target ci_test_clang --parallel 10 diff --git a/cmake/ci.cmake b/cmake/ci.cmake index 9cec0ed94e..0c05964b7c 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -12,40 +12,40 @@ include(FindPython3) find_package(Python3 COMPONENTS Interpreter) -find_program(CLANG_TIDY_TOOL NAMES clang-tidy REQUIRED) +find_program(CLANG_TIDY_TOOL NAMES clang-tidy) execute_process(COMMAND ${CLANG_TIDY_TOOL} --version OUTPUT_VARIABLE CLANG_TIDY_TOOL_VERSION ERROR_VARIABLE CLANG_TIDY_TOOL_VERSION) string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" CLANG_TIDY_TOOL_VERSION "${CLANG_TIDY_TOOL_VERSION}") message(STATUS "πŸ”– Clang-Tidy ${CLANG_TIDY_TOOL_VERSION} (${CLANG_TIDY_TOOL})") -find_program(CLANG_TOOL NAMES clang++-HEAD clang++-11 clang++ REQUIRED) +find_program(CLANG_TOOL NAMES clang++-HEAD clang++-11 clang++) execute_process(COMMAND ${CLANG_TOOL} --version OUTPUT_VARIABLE CLANG_TOOL_VERSION ERROR_VARIABLE CLANG_TOOL_VERSION) string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" CLANG_TOOL_VERSION "${CLANG_TOOL_VERSION}") message(STATUS "πŸ”– Clang ${CLANG_TOOL_VERSION} (${CLANG_TOOL})") -find_program(CPPCHECK_TOOL NAMES cppcheck REQUIRED) +find_program(CPPCHECK_TOOL NAMES cppcheck) execute_process(COMMAND ${CPPCHECK_TOOL} --version OUTPUT_VARIABLE CPPCHECK_TOOL_VERSION ERROR_VARIABLE CPPCHECK_TOOL_VERSION) string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" CPPCHECK_TOOL_VERSION "${CPPCHECK_TOOL_VERSION}") message(STATUS "πŸ”– Cppcheck ${CPPCHECK_TOOL_VERSION} (${CPPCHECK_TOOL})") -find_program(GCC_TOOL NAMES g++-HEAD g++-11 g++ REQUIRED) +find_program(GCC_TOOL NAMES g++-HEAD g++-11 g++) execute_process(COMMAND ${GCC_TOOL} --version OUTPUT_VARIABLE GCC_TOOL_VERSION ERROR_VARIABLE GCC_TOOL_VERSION) string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" GCC_TOOL_VERSION "${GCC_TOOL_VERSION}") message(STATUS "πŸ”– GCC ${GCC_TOOL_VERSION} (${GCC_TOOL})") -find_program(INFER_TOOL NAMES infer REQUIRED) +find_program(INFER_TOOL NAMES infer) execute_process(COMMAND ${INFER_TOOL} --version OUTPUT_VARIABLE INFER_TOOL_VERSION ERROR_VARIABLE INFER_TOOL_VERSION) string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" INFER_TOOL_VERSION "${INFER_TOOL_VERSION}") message(STATUS "πŸ”– Infer ${INFER_TOOL_VERSION} (${INFER_TOOL})") -find_program(IWYU_TOOL NAMES include-what-you-use iwyu REQUIRED) +find_program(IWYU_TOOL NAMES include-what-you-use iwyu) execute_process(COMMAND ${IWYU_TOOL} --version OUTPUT_VARIABLE IWYU_TOOL_VERSION ERROR_VARIABLE IWYU_TOOL_VERSION) string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" IWYU_TOOL_VERSION "${IWYU_TOOL_VERSION}") message(STATUS "πŸ”– include-what-you-use ${IWYU_TOOL_VERSION} (${IWYU_TOOL})") -find_program(OCLINT_TOOL NAMES oclint-json-compilation-database REQUIRED) -find_program(PLOG_CONVERTER_TOOL NAMES plog-converter REQUIRED) -find_program(PVS_STUDIO_ANALYZER_TOOL NAMES pvs-studio-analyzer REQUIRED) -find_program(SCAN_BUILD_TOOL NAMES scan-build REQUIRED) +find_program(OCLINT_TOOL NAMES oclint-json-compilation-database) +find_program(PLOG_CONVERTER_TOOL NAMES plog-converter) +find_program(PVS_STUDIO_ANALYZER_TOOL NAMES pvs-studio-analyzer) +find_program(SCAN_BUILD_TOOL NAMES scan-build) # the individual source files file(GLOB_RECURSE SRC_FILES ${PROJECT_SOURCE_DIR}/include/nlohmann/*.hpp) From bd859960f4795f65ddfed1addc4e8fb1489be370 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sun, 17 Jan 2021 14:41:06 +0100 Subject: [PATCH 018/113] :construction_worker: use GitHub Actions --- .github/workflows/ubuntu.yml | 4 ++-- cmake/ci.cmake | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 7f932063a2..11c5e75058 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -71,7 +71,7 @@ jobs: - name: install_gcc run: | sudo apt update - sudo apt install gcc-10 g++-10 + sudo apt install gcc-10 g++-10 ninja-build shell: bash - name: cmake run: cmake -S . -B build -DJSON_CI=On @@ -89,7 +89,7 @@ jobs: - name: install_clang run: | sudo apt update - sudo apt install clang-10 + sudo apt install clang-10 ninja-build shell: bash - name: cmake run: cmake -S . -B build -DJSON_CI=On diff --git a/cmake/ci.cmake b/cmake/ci.cmake index 0c05964b7c..2da75f434a 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -17,7 +17,7 @@ execute_process(COMMAND ${CLANG_TIDY_TOOL} --version OUTPUT_VARIABLE CLANG_TIDY_ string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" CLANG_TIDY_TOOL_VERSION "${CLANG_TIDY_TOOL_VERSION}") message(STATUS "πŸ”– Clang-Tidy ${CLANG_TIDY_TOOL_VERSION} (${CLANG_TIDY_TOOL})") -find_program(CLANG_TOOL NAMES clang++-HEAD clang++-11 clang++) +find_program(CLANG_TOOL NAMES clang++-HEAD clang++-11 clang++-10 clang++) execute_process(COMMAND ${CLANG_TOOL} --version OUTPUT_VARIABLE CLANG_TOOL_VERSION ERROR_VARIABLE CLANG_TOOL_VERSION) string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" CLANG_TOOL_VERSION "${CLANG_TOOL_VERSION}") message(STATUS "πŸ”– Clang ${CLANG_TOOL_VERSION} (${CLANG_TOOL})") @@ -27,7 +27,7 @@ execute_process(COMMAND ${CPPCHECK_TOOL} --version OUTPUT_VARIABLE CPPCHECK_TOOL string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" CPPCHECK_TOOL_VERSION "${CPPCHECK_TOOL_VERSION}") message(STATUS "πŸ”– Cppcheck ${CPPCHECK_TOOL_VERSION} (${CPPCHECK_TOOL})") -find_program(GCC_TOOL NAMES g++-HEAD g++-11 g++) +find_program(GCC_TOOL NAMES g++-HEAD g++-11 g++-10 g++) execute_process(COMMAND ${GCC_TOOL} --version OUTPUT_VARIABLE GCC_TOOL_VERSION ERROR_VARIABLE GCC_TOOL_VERSION) string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" GCC_TOOL_VERSION "${GCC_TOOL_VERSION}") message(STATUS "πŸ”– GCC ${GCC_TOOL_VERSION} (${GCC_TOOL})") From 755f6949956a0beaa2b8b52fcfaa7574d67a8501 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sun, 17 Jan 2021 14:46:20 +0100 Subject: [PATCH 019/113] :alembic: try to install Clang 11 --- .github/workflows/ubuntu.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 11c5e75058..2d25675031 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -89,7 +89,7 @@ jobs: - name: install_clang run: | sudo apt update - sudo apt install clang-10 ninja-build + sudo apt install clang-11 ninja-build shell: bash - name: cmake run: cmake -S . -B build -DJSON_CI=On From ce9f6a1133f53e2f16f69fd189e54fe7e4a0ebf3 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sun, 17 Jan 2021 14:59:46 +0100 Subject: [PATCH 020/113] :alembic: try to install GCC 11 --- .github/workflows/ubuntu.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 2d25675031..5e2357b65e 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -70,8 +70,9 @@ jobs: - uses: actions/checkout@v1 - name: install_gcc run: | + sudo add-apt-repository ppa:ubuntu-toolchain-r/test sudo apt update - sudo apt install gcc-10 g++-10 ninja-build + sudo apt install gcc-11 g++-11 ninja-build shell: bash - name: cmake run: cmake -S . -B build -DJSON_CI=On @@ -89,7 +90,7 @@ jobs: - name: install_clang run: | sudo apt update - sudo apt install clang-11 ninja-build + sudo apt install clang-10 ninja-build shell: bash - name: cmake run: cmake -S . -B build -DJSON_CI=On From bac98447ec3e9392707cce768cfe810c4660736f Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sun, 17 Jan 2021 15:12:49 +0100 Subject: [PATCH 021/113] :alembic: try to install Clang 11 --- .github/workflows/ubuntu.yml | 17 +++++++++++------ cmake/ci.cmake | 2 +- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 5e2357b65e..6223511a1f 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -70,9 +70,8 @@ jobs: - uses: actions/checkout@v1 - name: install_gcc run: | - sudo add-apt-repository ppa:ubuntu-toolchain-r/test sudo apt update - sudo apt install gcc-11 g++-11 ninja-build + sudo apt install gcc-10 g++-10 ninja-build shell: bash - name: cmake run: cmake -S . -B build -DJSON_CI=On @@ -87,15 +86,21 @@ jobs: steps: - uses: actions/checkout@v1 - - name: install_clang + - name: install_ninja run: | sudo apt update - sudo apt install clang-10 ninja-build + sudo apt install ninja-build + shell: bash + - name: install_clang + run: | + wget https://apt.llvm.org/llvm.sh + chmod +x llvm.sh + sudo ./llvm.sh 11 shell: bash - name: cmake run: cmake -S . -B build -DJSON_CI=On env: - CC: clang-10 - CXX: clang++-10 + CC: clang-11 + CXX: clang++-11 - name: build run: cmake --build build --target ci_test_clang --parallel 10 diff --git a/cmake/ci.cmake b/cmake/ci.cmake index 2da75f434a..5259c64e6f 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -17,7 +17,7 @@ execute_process(COMMAND ${CLANG_TIDY_TOOL} --version OUTPUT_VARIABLE CLANG_TIDY_ string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" CLANG_TIDY_TOOL_VERSION "${CLANG_TIDY_TOOL_VERSION}") message(STATUS "πŸ”– Clang-Tidy ${CLANG_TIDY_TOOL_VERSION} (${CLANG_TIDY_TOOL})") -find_program(CLANG_TOOL NAMES clang++-HEAD clang++-11 clang++-10 clang++) +find_program(CLANG_TOOL NAMES clang++-HEAD clang++-11 clang++) execute_process(COMMAND ${CLANG_TOOL} --version OUTPUT_VARIABLE CLANG_TOOL_VERSION ERROR_VARIABLE CLANG_TOOL_VERSION) string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" CLANG_TOOL_VERSION "${CLANG_TOOL_VERSION}") message(STATUS "πŸ”– Clang ${CLANG_TOOL_VERSION} (${CLANG_TOOL})") From a749b1cca500576bab7130cdce56a92475656ab5 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sun, 17 Jan 2021 16:55:06 +0100 Subject: [PATCH 022/113] :alembic: try to install GCC 11 --- .github/workflows/ubuntu.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 6223511a1f..2d18966900 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -68,16 +68,19 @@ jobs: steps: - uses: actions/checkout@v1 + - name: install_ninja + run: | + sudo apt update + sudo apt install ninja-build + shell: bash - name: install_gcc run: | + sudo add-apt-repository ppa:ubuntu-toolchain-r/test sudo apt update - sudo apt install gcc-10 g++-10 ninja-build + sudo apt install gcc-11 shell: bash - name: cmake run: cmake -S . -B build -DJSON_CI=On - env: - CC: gcc-10 - CXX: g++-10 - name: build run: cmake --build build --target ci_test_gcc --parallel 10 @@ -99,8 +102,5 @@ jobs: shell: bash - name: cmake run: cmake -S . -B build -DJSON_CI=On - env: - CC: clang-11 - CXX: clang++-11 - name: build run: cmake --build build --target ci_test_clang --parallel 10 From b7befb2faae22b84cd8e2e279e2bfb40b2dfad1b Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sun, 17 Jan 2021 17:11:39 +0100 Subject: [PATCH 023/113] :alembic: add clang analyze target --- .github/workflows/ubuntu.yml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 2d18966900..85438b1bd8 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -63,7 +63,7 @@ jobs: - name: test run: cd build ; ctest -j 10 --output-on-failure - ci_test_gcc: + ci_test_clang: runs-on: ubuntu-latest steps: @@ -73,18 +73,18 @@ jobs: sudo apt update sudo apt install ninja-build shell: bash - - name: install_gcc + - name: install_clang run: | - sudo add-apt-repository ppa:ubuntu-toolchain-r/test - sudo apt update - sudo apt install gcc-11 + wget https://apt.llvm.org/llvm.sh + chmod +x llvm.sh + sudo ./llvm.sh 11 shell: bash - name: cmake run: cmake -S . -B build -DJSON_CI=On - name: build - run: cmake --build build --target ci_test_gcc --parallel 10 + run: cmake --build build --target ci_test_clang - ci_test_clang: + ci_clang_analyze: runs-on: ubuntu-latest steps: @@ -103,4 +103,4 @@ jobs: - name: cmake run: cmake -S . -B build -DJSON_CI=On - name: build - run: cmake --build build --target ci_test_clang --parallel 10 + run: cmake --build build --target ci_clang_analyze From 06bffd263e2374670cf0643566d76ea7c36b0959 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sun, 17 Jan 2021 20:33:36 +0100 Subject: [PATCH 024/113] :alembic: add clang analyze target --- .github/workflows/ubuntu.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 85438b1bd8..b12f5cb259 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -99,6 +99,7 @@ jobs: wget https://apt.llvm.org/llvm.sh chmod +x llvm.sh sudo ./llvm.sh 11 + sudo apt-get install clang-tools-11 shell: bash - name: cmake run: cmake -S . -B build -DJSON_CI=On From 08d51c0cf6691936228d189963394eab3ccc4cc6 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sun, 17 Jan 2021 20:36:53 +0100 Subject: [PATCH 025/113] :alembic: add clang analyze target --- cmake/ci.cmake | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmake/ci.cmake b/cmake/ci.cmake index 5259c64e6f..45eb8a91f1 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -12,7 +12,7 @@ include(FindPython3) find_package(Python3 COMPONENTS Interpreter) -find_program(CLANG_TIDY_TOOL NAMES clang-tidy) +find_program(CLANG_TIDY_TOOL NAMES clang-tidy-11 clang-tidy) execute_process(COMMAND ${CLANG_TIDY_TOOL} --version OUTPUT_VARIABLE CLANG_TIDY_TOOL_VERSION ERROR_VARIABLE CLANG_TIDY_TOOL_VERSION) string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" CLANG_TIDY_TOOL_VERSION "${CLANG_TIDY_TOOL_VERSION}") message(STATUS "πŸ”– Clang-Tidy ${CLANG_TIDY_TOOL_VERSION} (${CLANG_TIDY_TOOL})") @@ -45,7 +45,7 @@ message(STATUS "πŸ”– include-what-you-use ${IWYU_TOOL_VERSION} (${IWYU_TOOL})") find_program(OCLINT_TOOL NAMES oclint-json-compilation-database) find_program(PLOG_CONVERTER_TOOL NAMES plog-converter) find_program(PVS_STUDIO_ANALYZER_TOOL NAMES pvs-studio-analyzer) -find_program(SCAN_BUILD_TOOL NAMES scan-build) +find_program(SCAN_BUILD_TOOL NAMES scan-build-11 scan-build) # the individual source files file(GLOB_RECURSE SRC_FILES ${PROJECT_SOURCE_DIR}/include/nlohmann/*.hpp) From d2ef9a4416a49670400bed94daf525b75c1af2e7 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sat, 23 Jan 2021 13:56:55 +0100 Subject: [PATCH 026/113] :fire: remove Google Benchmark --- benchmarks/thirdparty/benchmark/AUTHORS | 46 - benchmarks/thirdparty/benchmark/BUILD.bazel | 42 - .../thirdparty/benchmark/CMakeLists.txt | 251 --- .../thirdparty/benchmark/CONTRIBUTING.md | 58 - benchmarks/thirdparty/benchmark/CONTRIBUTORS | 65 - benchmarks/thirdparty/benchmark/LICENSE | 202 --- benchmarks/thirdparty/benchmark/README.md | 950 ----------- benchmarks/thirdparty/benchmark/WORKSPACE | 7 - benchmarks/thirdparty/benchmark/appveyor.yml | 56 - .../benchmark/cmake/AddCXXCompilerFlag.cmake | 74 - .../benchmark/cmake/CXXFeatureCheck.cmake | 64 - .../benchmark/cmake/Config.cmake.in | 1 - .../benchmark/cmake/GetGitVersion.cmake | 54 - .../benchmark/cmake/HandleGTest.cmake | 113 -- .../benchmark/cmake/Modules/FindLLVMAr.cmake | 16 - .../benchmark/cmake/Modules/FindLLVMNm.cmake | 16 - .../cmake/Modules/FindLLVMRanLib.cmake | 15 - .../benchmark/cmake/benchmark.pc.in | 11 - .../benchmark/cmake/gnu_posix_regex.cpp | 12 - .../benchmark/cmake/llvm-toolchain.cmake | 8 - .../benchmark/cmake/posix_regex.cpp | 14 - .../benchmark/cmake/split_list.cmake | 3 - .../thirdparty/benchmark/cmake/std_regex.cpp | 10 - .../benchmark/cmake/steady_clock.cpp | 7 - .../cmake/thread_safety_attributes.cpp | 4 - .../benchmark/docs/AssemblyTests.md | 147 -- benchmarks/thirdparty/benchmark/docs/tools.md | 242 --- .../benchmark/include/benchmark/benchmark.h | 1456 ----------------- benchmarks/thirdparty/benchmark/mingw.py | 320 ---- benchmarks/thirdparty/benchmark/releasing.md | 16 - .../thirdparty/benchmark/src/CMakeLists.txt | 105 -- .../thirdparty/benchmark/src/arraysize.h | 33 - .../thirdparty/benchmark/src/benchmark.cc | 630 ------- .../benchmark/src/benchmark_api_internal.h | 47 - .../benchmark/src/benchmark_main.cc | 17 - .../benchmark/src/benchmark_register.cc | 461 ------ .../benchmark/src/benchmark_register.h | 33 - benchmarks/thirdparty/benchmark/src/check.h | 79 - .../thirdparty/benchmark/src/colorprint.cc | 188 --- .../thirdparty/benchmark/src/colorprint.h | 33 - .../benchmark/src/commandlineflags.cc | 218 --- .../benchmark/src/commandlineflags.h | 79 - .../thirdparty/benchmark/src/complexity.cc | 220 --- .../thirdparty/benchmark/src/complexity.h | 55 - .../benchmark/src/console_reporter.cc | 182 --- .../thirdparty/benchmark/src/counter.cc | 68 - benchmarks/thirdparty/benchmark/src/counter.h | 26 - .../thirdparty/benchmark/src/csv_reporter.cc | 149 -- .../thirdparty/benchmark/src/cycleclock.h | 177 -- .../benchmark/src/internal_macros.h | 89 - .../thirdparty/benchmark/src/json_reporter.cc | 205 --- benchmarks/thirdparty/benchmark/src/log.h | 73 - benchmarks/thirdparty/benchmark/src/mutex.h | 155 -- benchmarks/thirdparty/benchmark/src/re.h | 152 -- .../thirdparty/benchmark/src/reporter.cc | 87 - benchmarks/thirdparty/benchmark/src/sleep.cc | 51 - benchmarks/thirdparty/benchmark/src/sleep.h | 15 - .../thirdparty/benchmark/src/statistics.cc | 178 -- .../thirdparty/benchmark/src/statistics.h | 37 - .../thirdparty/benchmark/src/string_util.cc | 172 -- .../thirdparty/benchmark/src/string_util.h | 40 - .../thirdparty/benchmark/src/sysinfo.cc | 587 ------- .../thirdparty/benchmark/src/thread_manager.h | 66 - .../thirdparty/benchmark/src/thread_timer.h | 69 - benchmarks/thirdparty/benchmark/src/timers.cc | 217 --- benchmarks/thirdparty/benchmark/src/timers.h | 48 - .../thirdparty/benchmark/tools/compare.py | 316 ---- .../benchmark/tools/compare_bench.py | 67 - .../tools/gbench/Inputs/test1_run1.json | 102 -- .../tools/gbench/Inputs/test1_run2.json | 102 -- .../tools/gbench/Inputs/test2_run.json | 81 - .../benchmark/tools/gbench/__init__.py | 8 - .../benchmark/tools/gbench/report.py | 208 --- .../thirdparty/benchmark/tools/gbench/util.py | 159 -- .../thirdparty/benchmark/tools/strip_asm.py | 151 -- 75 files changed, 10515 deletions(-) delete mode 100755 benchmarks/thirdparty/benchmark/AUTHORS delete mode 100755 benchmarks/thirdparty/benchmark/BUILD.bazel delete mode 100755 benchmarks/thirdparty/benchmark/CMakeLists.txt delete mode 100755 benchmarks/thirdparty/benchmark/CONTRIBUTING.md delete mode 100755 benchmarks/thirdparty/benchmark/CONTRIBUTORS delete mode 100755 benchmarks/thirdparty/benchmark/LICENSE delete mode 100755 benchmarks/thirdparty/benchmark/README.md delete mode 100755 benchmarks/thirdparty/benchmark/WORKSPACE delete mode 100755 benchmarks/thirdparty/benchmark/appveyor.yml delete mode 100755 benchmarks/thirdparty/benchmark/cmake/AddCXXCompilerFlag.cmake delete mode 100755 benchmarks/thirdparty/benchmark/cmake/CXXFeatureCheck.cmake delete mode 100755 benchmarks/thirdparty/benchmark/cmake/Config.cmake.in delete mode 100755 benchmarks/thirdparty/benchmark/cmake/GetGitVersion.cmake delete mode 100755 benchmarks/thirdparty/benchmark/cmake/HandleGTest.cmake delete mode 100755 benchmarks/thirdparty/benchmark/cmake/Modules/FindLLVMAr.cmake delete mode 100755 benchmarks/thirdparty/benchmark/cmake/Modules/FindLLVMNm.cmake delete mode 100755 benchmarks/thirdparty/benchmark/cmake/Modules/FindLLVMRanLib.cmake delete mode 100755 benchmarks/thirdparty/benchmark/cmake/benchmark.pc.in delete mode 100755 benchmarks/thirdparty/benchmark/cmake/gnu_posix_regex.cpp delete mode 100755 benchmarks/thirdparty/benchmark/cmake/llvm-toolchain.cmake delete mode 100755 benchmarks/thirdparty/benchmark/cmake/posix_regex.cpp delete mode 100755 benchmarks/thirdparty/benchmark/cmake/split_list.cmake delete mode 100755 benchmarks/thirdparty/benchmark/cmake/std_regex.cpp delete mode 100755 benchmarks/thirdparty/benchmark/cmake/steady_clock.cpp delete mode 100755 benchmarks/thirdparty/benchmark/cmake/thread_safety_attributes.cpp delete mode 100755 benchmarks/thirdparty/benchmark/docs/AssemblyTests.md delete mode 100755 benchmarks/thirdparty/benchmark/docs/tools.md delete mode 100755 benchmarks/thirdparty/benchmark/include/benchmark/benchmark.h delete mode 100755 benchmarks/thirdparty/benchmark/mingw.py delete mode 100755 benchmarks/thirdparty/benchmark/releasing.md delete mode 100755 benchmarks/thirdparty/benchmark/src/CMakeLists.txt delete mode 100755 benchmarks/thirdparty/benchmark/src/arraysize.h delete mode 100755 benchmarks/thirdparty/benchmark/src/benchmark.cc delete mode 100755 benchmarks/thirdparty/benchmark/src/benchmark_api_internal.h delete mode 100755 benchmarks/thirdparty/benchmark/src/benchmark_main.cc delete mode 100755 benchmarks/thirdparty/benchmark/src/benchmark_register.cc delete mode 100755 benchmarks/thirdparty/benchmark/src/benchmark_register.h delete mode 100755 benchmarks/thirdparty/benchmark/src/check.h delete mode 100755 benchmarks/thirdparty/benchmark/src/colorprint.cc delete mode 100755 benchmarks/thirdparty/benchmark/src/colorprint.h delete mode 100755 benchmarks/thirdparty/benchmark/src/commandlineflags.cc delete mode 100755 benchmarks/thirdparty/benchmark/src/commandlineflags.h delete mode 100755 benchmarks/thirdparty/benchmark/src/complexity.cc delete mode 100755 benchmarks/thirdparty/benchmark/src/complexity.h delete mode 100755 benchmarks/thirdparty/benchmark/src/console_reporter.cc delete mode 100755 benchmarks/thirdparty/benchmark/src/counter.cc delete mode 100755 benchmarks/thirdparty/benchmark/src/counter.h delete mode 100755 benchmarks/thirdparty/benchmark/src/csv_reporter.cc delete mode 100755 benchmarks/thirdparty/benchmark/src/cycleclock.h delete mode 100755 benchmarks/thirdparty/benchmark/src/internal_macros.h delete mode 100755 benchmarks/thirdparty/benchmark/src/json_reporter.cc delete mode 100755 benchmarks/thirdparty/benchmark/src/log.h delete mode 100755 benchmarks/thirdparty/benchmark/src/mutex.h delete mode 100755 benchmarks/thirdparty/benchmark/src/re.h delete mode 100755 benchmarks/thirdparty/benchmark/src/reporter.cc delete mode 100755 benchmarks/thirdparty/benchmark/src/sleep.cc delete mode 100755 benchmarks/thirdparty/benchmark/src/sleep.h delete mode 100755 benchmarks/thirdparty/benchmark/src/statistics.cc delete mode 100755 benchmarks/thirdparty/benchmark/src/statistics.h delete mode 100755 benchmarks/thirdparty/benchmark/src/string_util.cc delete mode 100755 benchmarks/thirdparty/benchmark/src/string_util.h delete mode 100755 benchmarks/thirdparty/benchmark/src/sysinfo.cc delete mode 100755 benchmarks/thirdparty/benchmark/src/thread_manager.h delete mode 100755 benchmarks/thirdparty/benchmark/src/thread_timer.h delete mode 100755 benchmarks/thirdparty/benchmark/src/timers.cc delete mode 100755 benchmarks/thirdparty/benchmark/src/timers.h delete mode 100755 benchmarks/thirdparty/benchmark/tools/compare.py delete mode 100755 benchmarks/thirdparty/benchmark/tools/compare_bench.py delete mode 100755 benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test1_run1.json delete mode 100755 benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test1_run2.json delete mode 100755 benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test2_run.json delete mode 100755 benchmarks/thirdparty/benchmark/tools/gbench/__init__.py delete mode 100755 benchmarks/thirdparty/benchmark/tools/gbench/report.py delete mode 100755 benchmarks/thirdparty/benchmark/tools/gbench/util.py delete mode 100755 benchmarks/thirdparty/benchmark/tools/strip_asm.py diff --git a/benchmarks/thirdparty/benchmark/AUTHORS b/benchmarks/thirdparty/benchmark/AUTHORS deleted file mode 100755 index f8219036d2..0000000000 --- a/benchmarks/thirdparty/benchmark/AUTHORS +++ /dev/null @@ -1,46 +0,0 @@ -# This is the official list of benchmark authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. -# -# Names should be added to this file as: -# Name or Organization -# The email address is not required for organizations. -# -# Please keep the list sorted. - -Albert Pretorius -Arne Beer -Carto -Christopher Seymour -David Coeurjolly -Deniz Evrenci -Dirac Research -Dominik Czarnota -Eric Fiselier -Eugene Zhuk -Evgeny Safronov -Felix Homann -Google Inc. -International Business Machines Corporation -Ismael Jimenez Martinez -Jern-Kuan Leong -JianXiong Zhou -Joao Paulo Magalhaes -Jussi Knuuttila -Kaito Udagawa -Kishan Kumar -Lei Xu -Matt Clarkson -Maxim Vafin -MongoDB Inc. -Nick Hutchinson -Oleksandr Sochka -Paul Redmond -Radoslav Yovchev -Roman Lebedev -Shuo Chen -Steinar H. Gunderson -Stripe, Inc. -Yixuan Qiu -Yusuke Suzuki -Zbigniew Skowron diff --git a/benchmarks/thirdparty/benchmark/BUILD.bazel b/benchmarks/thirdparty/benchmark/BUILD.bazel deleted file mode 100755 index 6ee69f2907..0000000000 --- a/benchmarks/thirdparty/benchmark/BUILD.bazel +++ /dev/null @@ -1,42 +0,0 @@ -licenses(["notice"]) - -config_setting( - name = "windows", - values = { - "cpu": "x64_windows", - }, - visibility = [":__subpackages__"], -) - -cc_library( - name = "benchmark", - srcs = glob( - [ - "src/*.cc", - "src/*.h", - ], - exclude = ["src/benchmark_main.cc"], - ), - hdrs = ["include/benchmark/benchmark.h"], - linkopts = select({ - ":windows": ["-DEFAULTLIB:shlwapi.lib"], - "//conditions:default": ["-pthread"], - }), - strip_include_prefix = "include", - visibility = ["//visibility:public"], -) - -cc_library( - name = "benchmark_main", - srcs = ["src/benchmark_main.cc"], - hdrs = ["include/benchmark/benchmark.h"], - strip_include_prefix = "include", - visibility = ["//visibility:public"], - deps = [":benchmark"], -) - -cc_library( - name = "benchmark_internal_headers", - hdrs = glob(["src/*.h"]), - visibility = ["//test:__pkg__"], -) diff --git a/benchmarks/thirdparty/benchmark/CMakeLists.txt b/benchmarks/thirdparty/benchmark/CMakeLists.txt deleted file mode 100755 index b1c1d3d5a9..0000000000 --- a/benchmarks/thirdparty/benchmark/CMakeLists.txt +++ /dev/null @@ -1,251 +0,0 @@ -cmake_minimum_required (VERSION 2.8.12) - -project (benchmark) - -foreach(p - CMP0054 # CMake 3.1 - CMP0056 # export EXE_LINKER_FLAGS to try_run - CMP0057 # Support no if() IN_LIST operator - ) - if(POLICY ${p}) - cmake_policy(SET ${p} NEW) - endif() -endforeach() - -option(BENCHMARK_ENABLE_TESTING "Enable testing of the benchmark library." ON) -option(BENCHMARK_ENABLE_EXCEPTIONS "Enable the use of exceptions in the benchmark library." ON) -option(BENCHMARK_ENABLE_LTO "Enable link time optimisation of the benchmark library." OFF) -option(BENCHMARK_USE_LIBCXX "Build and test using libc++ as the standard library." OFF) -option(BENCHMARK_BUILD_32_BITS "Build a 32 bit version of the library." OFF) -option(BENCHMARK_ENABLE_INSTALL "Enable installation of benchmark. (Projects embedding benchmark may want to turn this OFF.)" ON) - -# Allow unmet dependencies to be met using CMake's ExternalProject mechanics, which -# may require downloading the source code. -option(BENCHMARK_DOWNLOAD_DEPENDENCIES "Allow the downloading and in-tree building of unmet dependencies" OFF) - -# This option can be used to disable building and running unit tests which depend on gtest -# in cases where it is not possible to build or find a valid version of gtest. -option(BENCHMARK_ENABLE_GTEST_TESTS "Enable building the unit tests which depend on gtest" ON) - -set(ENABLE_ASSEMBLY_TESTS_DEFAULT OFF) -function(should_enable_assembly_tests) - if(CMAKE_BUILD_TYPE) - string(TOLOWER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_LOWER) - if (${CMAKE_BUILD_TYPE_LOWER} MATCHES "coverage") - # FIXME: The --coverage flag needs to be removed when building assembly - # tests for this to work. - return() - endif() - endif() - if (MSVC) - return() - elseif(NOT CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64") - return() - elseif(NOT CMAKE_SIZEOF_VOID_P EQUAL 8) - # FIXME: Make these work on 32 bit builds - return() - elseif(BENCHMARK_BUILD_32_BITS) - # FIXME: Make these work on 32 bit builds - return() - endif() - find_program(LLVM_FILECHECK_EXE FileCheck) - if (LLVM_FILECHECK_EXE) - set(LLVM_FILECHECK_EXE "${LLVM_FILECHECK_EXE}" CACHE PATH "llvm filecheck" FORCE) - message(STATUS "LLVM FileCheck Found: ${LLVM_FILECHECK_EXE}") - else() - message(STATUS "Failed to find LLVM FileCheck") - return() - endif() - set(ENABLE_ASSEMBLY_TESTS_DEFAULT ON PARENT_SCOPE) -endfunction() -should_enable_assembly_tests() - -# This option disables the building and running of the assembly verification tests -option(BENCHMARK_ENABLE_ASSEMBLY_TESTS "Enable building and running the assembly tests" - ${ENABLE_ASSEMBLY_TESTS_DEFAULT}) - -# Make sure we can import out CMake functions -list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules") -list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake") - - -# Read the git tags to determine the project version -include(GetGitVersion) -get_git_version(GIT_VERSION) - -# Tell the user what versions we are using -string(REGEX MATCH "[0-9]+\\.[0-9]+\\.[0-9]+" VERSION ${GIT_VERSION}) -message("-- Version: ${VERSION}") - -# The version of the libraries -set(GENERIC_LIB_VERSION ${VERSION}) -string(SUBSTRING ${VERSION} 0 1 GENERIC_LIB_SOVERSION) - -# Import our CMake modules -include(CheckCXXCompilerFlag) -include(AddCXXCompilerFlag) -include(CXXFeatureCheck) - -if (BENCHMARK_BUILD_32_BITS) - add_required_cxx_compiler_flag(-m32) -endif() - -if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") - # Turn compiler warnings up to 11 - string(REGEX REPLACE "[-/]W[1-4]" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W4") - add_definitions(-D_CRT_SECURE_NO_WARNINGS) - - if (NOT BENCHMARK_ENABLE_EXCEPTIONS) - add_cxx_compiler_flag(-EHs-) - add_cxx_compiler_flag(-EHa-) - endif() - # Link time optimisation - if (BENCHMARK_ENABLE_LTO) - set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /GL") - set(CMAKE_STATIC_LINKER_FLAGS_RELEASE "${CMAKE_STATIC_LINKER_FLAGS_RELEASE} /LTCG") - set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} /LTCG") - set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} /LTCG") - - set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} /GL") - string(REGEX REPLACE "[-/]INCREMENTAL" "/INCREMENTAL:NO" CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO}") - set(CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO} /LTCG") - string(REGEX REPLACE "[-/]INCREMENTAL" "/INCREMENTAL:NO" CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO}") - set(CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO} /LTCG") - string(REGEX REPLACE "[-/]INCREMENTAL" "/INCREMENTAL:NO" CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO}") - set(CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} /LTCG") - - set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL} /GL") - set(CMAKE_STATIC_LINKER_FLAGS_MINSIZEREL "${CMAKE_STATIC_LINKER_FLAGS_MINSIZEREL} /LTCG") - set(CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL "${CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL} /LTCG") - set(CMAKE_EXE_LINKER_FLAGS_MINSIZEREL "${CMAKE_EXE_LINKER_FLAGS_MINSIZEREL} /LTCG") - endif() -else() - # Try and enable C++11. Don't use C++14 because it doesn't work in some - # configurations. - add_cxx_compiler_flag(-std=c++11) - if (NOT HAVE_CXX_FLAG_STD_CXX11) - add_cxx_compiler_flag(-std=c++0x) - endif() - - # Turn compiler warnings up to 11 - add_cxx_compiler_flag(-Wall) - - add_cxx_compiler_flag(-Wextra) - add_cxx_compiler_flag(-Wshadow) - add_cxx_compiler_flag(-Werror RELEASE) - add_cxx_compiler_flag(-Werror RELWITHDEBINFO) - add_cxx_compiler_flag(-Werror MINSIZEREL) - add_cxx_compiler_flag(-pedantic) - add_cxx_compiler_flag(-pedantic-errors) - add_cxx_compiler_flag(-Wshorten-64-to-32) - add_cxx_compiler_flag(-Wfloat-equal) - add_cxx_compiler_flag(-fstrict-aliasing) - if (NOT BENCHMARK_ENABLE_EXCEPTIONS) - add_cxx_compiler_flag(-fno-exceptions) - endif() - - if (HAVE_CXX_FLAG_FSTRICT_ALIASING) - if (NOT CMAKE_CXX_COMPILER_ID STREQUAL "Intel") #ICC17u2: Many false positives for Wstrict-aliasing - add_cxx_compiler_flag(-Wstrict-aliasing) - endif() - endif() - # ICC17u2: overloaded virtual function "benchmark::Fixture::SetUp" is only partially overridden - # (because of deprecated overload) - add_cxx_compiler_flag(-wd654) - add_cxx_compiler_flag(-Wthread-safety) - if (HAVE_CXX_FLAG_WTHREAD_SAFETY) - cxx_feature_check(THREAD_SAFETY_ATTRIBUTES) - endif() - - # On most UNIX like platforms g++ and clang++ define _GNU_SOURCE as a - # predefined macro, which turns on all of the wonderful libc extensions. - # However g++ doesn't do this in Cygwin so we have to define it ourselfs - # since we depend on GNU/POSIX/BSD extensions. - if (CYGWIN) - add_definitions(-D_GNU_SOURCE=1) - endif() - - # Link time optimisation - if (BENCHMARK_ENABLE_LTO) - add_cxx_compiler_flag(-flto) - if ("${CMAKE_C_COMPILER_ID}" STREQUAL "GNU") - find_program(GCC_AR gcc-ar) - if (GCC_AR) - set(CMAKE_AR ${GCC_AR}) - endif() - find_program(GCC_RANLIB gcc-ranlib) - if (GCC_RANLIB) - set(CMAKE_RANLIB ${GCC_RANLIB}) - endif() - elseif("${CMAKE_C_COMPILER_ID}" STREQUAL "Clang") - include(llvm-toolchain) - endif() - endif() - - # Coverage build type - set(BENCHMARK_CXX_FLAGS_COVERAGE "${CMAKE_CXX_FLAGS_DEBUG}" - CACHE STRING "Flags used by the C++ compiler during coverage builds." - FORCE) - set(BENCHMARK_EXE_LINKER_FLAGS_COVERAGE "${CMAKE_EXE_LINKER_FLAGS_DEBUG}" - CACHE STRING "Flags used for linking binaries during coverage builds." - FORCE) - set(BENCHMARK_SHARED_LINKER_FLAGS_COVERAGE "${CMAKE_SHARED_LINKER_FLAGS_DEBUG}" - CACHE STRING "Flags used by the shared libraries linker during coverage builds." - FORCE) - mark_as_advanced( - BENCHMARK_CXX_FLAGS_COVERAGE - BENCHMARK_EXE_LINKER_FLAGS_COVERAGE - BENCHMARK_SHARED_LINKER_FLAGS_COVERAGE) - set(CMAKE_BUILD_TYPE "${CMAKE_BUILD_TYPE}" CACHE STRING - "Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel Coverage.") - add_cxx_compiler_flag(--coverage COVERAGE) -endif() - -if (BENCHMARK_USE_LIBCXX) - if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang") - add_cxx_compiler_flag(-stdlib=libc++) - elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU" OR - "${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel") - add_cxx_compiler_flag(-nostdinc++) - message("libc++ header path must be manually specified using CMAKE_CXX_FLAGS") - # Adding -nodefaultlibs directly to CMAKE__LINKER_FLAGS will break - # configuration checks such as 'find_package(Threads)' - list(APPEND BENCHMARK_CXX_LINKER_FLAGS -nodefaultlibs) - # -lc++ cannot be added directly to CMAKE__LINKER_FLAGS because - # linker flags appear before all linker inputs and -lc++ must appear after. - list(APPEND BENCHMARK_CXX_LIBRARIES c++) - else() - message(FATAL_ERROR "-DBENCHMARK_USE_LIBCXX:BOOL=ON is not supported for compiler") - endif() -endif(BENCHMARK_USE_LIBCXX) - -# C++ feature checks -# Determine the correct regular expression engine to use -cxx_feature_check(STD_REGEX) -cxx_feature_check(GNU_POSIX_REGEX) -cxx_feature_check(POSIX_REGEX) -if(NOT HAVE_STD_REGEX AND NOT HAVE_GNU_POSIX_REGEX AND NOT HAVE_POSIX_REGEX) - message(FATAL_ERROR "Failed to determine the source files for the regular expression backend") -endif() -if (NOT BENCHMARK_ENABLE_EXCEPTIONS AND HAVE_STD_REGEX - AND NOT HAVE_GNU_POSIX_REGEX AND NOT HAVE_POSIX_REGEX) - message(WARNING "Using std::regex with exceptions disabled is not fully supported") -endif() -cxx_feature_check(STEADY_CLOCK) -# Ensure we have pthreads -find_package(Threads REQUIRED) - -# Set up directories -include_directories(${PROJECT_SOURCE_DIR}/include) - -# Build the targets -add_subdirectory(src) - -if (BENCHMARK_ENABLE_TESTING) - enable_testing() - if (BENCHMARK_ENABLE_GTEST_TESTS) - include(HandleGTest) - endif() - add_subdirectory(test) -endif() diff --git a/benchmarks/thirdparty/benchmark/CONTRIBUTING.md b/benchmarks/thirdparty/benchmark/CONTRIBUTING.md deleted file mode 100755 index 43de4c9d47..0000000000 --- a/benchmarks/thirdparty/benchmark/CONTRIBUTING.md +++ /dev/null @@ -1,58 +0,0 @@ -# How to contribute # - -We'd love to accept your patches and contributions to this project. There are -a just a few small guidelines you need to follow. - - -## Contributor License Agreement ## - -Contributions to any Google project must be accompanied by a Contributor -License Agreement. This is not a copyright **assignment**, it simply gives -Google permission to use and redistribute your contributions as part of the -project. - - * If you are an individual writing original source code and you're sure you - own the intellectual property, then you'll need to sign an [individual - CLA][]. - - * If you work for a company that wants to allow you to contribute your work, - then you'll need to sign a [corporate CLA][]. - -You generally only need to submit a CLA once, so if you've already submitted -one (even if it was for a different project), you probably don't need to do it -again. - -[individual CLA]: https://developers.google.com/open-source/cla/individual -[corporate CLA]: https://developers.google.com/open-source/cla/corporate - -Once your CLA is submitted (or if you already submitted one for -another Google project), make a commit adding yourself to the -[AUTHORS][] and [CONTRIBUTORS][] files. This commit can be part -of your first [pull request][]. - -[AUTHORS]: AUTHORS -[CONTRIBUTORS]: CONTRIBUTORS - - -## Submitting a patch ## - - 1. It's generally best to start by opening a new issue describing the bug or - feature you're intending to fix. Even if you think it's relatively minor, - it's helpful to know what people are working on. Mention in the initial - issue that you are planning to work on that bug or feature so that it can - be assigned to you. - - 1. Follow the normal process of [forking][] the project, and setup a new - branch to work in. It's important that each group of changes be done in - separate branches in order to ensure that a pull request only includes the - commits related to that bug or feature. - - 1. Do your best to have [well-formed commit messages][] for each change. - This provides consistency throughout the project, and ensures that commit - messages are able to be formatted properly by various git tools. - - 1. Finally, push the commits to your fork and submit a [pull request][]. - -[forking]: https://help.github.com/articles/fork-a-repo -[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html -[pull request]: https://help.github.com/articles/creating-a-pull-request diff --git a/benchmarks/thirdparty/benchmark/CONTRIBUTORS b/benchmarks/thirdparty/benchmark/CONTRIBUTORS deleted file mode 100755 index 1cf04db17e..0000000000 --- a/benchmarks/thirdparty/benchmark/CONTRIBUTORS +++ /dev/null @@ -1,65 +0,0 @@ -# People who have agreed to one of the CLAs and can contribute patches. -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees are listed here -# but not in AUTHORS, because Google holds the copyright. -# -# Names should be added to this file only after verifying that -# the individual or the individual's organization has agreed to -# the appropriate Contributor License Agreement, found here: -# -# https://developers.google.com/open-source/cla/individual -# https://developers.google.com/open-source/cla/corporate -# -# The agreement for individuals can be filled out on the web. -# -# When adding J Random Contributor's name to this file, -# either J's name or J's organization's name should be -# added to the AUTHORS file, depending on whether the -# individual or corporate CLA was used. -# -# Names should be added to this file as: -# Name -# -# Please keep the list sorted. - -Albert Pretorius -Arne Beer -Billy Robert O'Neal III -Chris Kennelly -Christopher Seymour -David Coeurjolly -Deniz Evrenci -Dominic Hamon -Dominik Czarnota -Eric Fiselier -Eugene Zhuk -Evgeny Safronov -Felix Homann -Ismael Jimenez Martinez -Jern-Kuan Leong -JianXiong Zhou -Joao Paulo Magalhaes -John Millikin -Jussi Knuuttila -Kai Wolf -Kishan Kumar -Kaito Udagawa -Lei Xu -Matt Clarkson -Maxim Vafin -Nick Hutchinson -Oleksandr Sochka -Pascal Leroy -Paul Redmond -Pierre Phaneuf -Radoslav Yovchev -Raul Marin -Ray Glover -Robert Guo -Roman Lebedev -Shuo Chen -Tobias UlvgΓ₯rd -Tom Madams -Yixuan Qiu -Yusuke Suzuki -Zbigniew Skowron diff --git a/benchmarks/thirdparty/benchmark/LICENSE b/benchmarks/thirdparty/benchmark/LICENSE deleted file mode 100755 index d645695673..0000000000 --- a/benchmarks/thirdparty/benchmark/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/benchmarks/thirdparty/benchmark/README.md b/benchmarks/thirdparty/benchmark/README.md deleted file mode 100755 index 0341c31bd7..0000000000 --- a/benchmarks/thirdparty/benchmark/README.md +++ /dev/null @@ -1,950 +0,0 @@ -# benchmark -[![Build Status](https://travis-ci.org/google/benchmark.svg?branch=master)](https://travis-ci.org/google/benchmark) -[![Build status](https://ci.appveyor.com/api/projects/status/u0qsyp7t1tk7cpxs/branch/master?svg=true)](https://ci.appveyor.com/project/google/benchmark/branch/master) -[![Coverage Status](https://coveralls.io/repos/google/benchmark/badge.svg)](https://coveralls.io/r/google/benchmark) -[![slackin](https://slackin-iqtfqnpzxd.now.sh/badge.svg)](https://slackin-iqtfqnpzxd.now.sh/) - -A library to support the benchmarking of functions, similar to unit-tests. - -Discussion group: https://groups.google.com/d/forum/benchmark-discuss - -IRC channel: https://freenode.net #googlebenchmark - -[Known issues and common problems](#known-issues) - -[Additional Tooling Documentation](docs/tools.md) - -[Assembly Testing Documentation](docs/AssemblyTests.md) - - -## Building - -The basic steps for configuring and building the library look like this: - -```bash -$ git clone https://github.com/google/benchmark.git -# Benchmark requires Google Test as a dependency. Add the source tree as a subdirectory. -$ git clone https://github.com/google/googletest.git benchmark/googletest -$ mkdir build && cd build -$ cmake -G [options] ../benchmark -# Assuming a makefile generator was used -$ make -``` - -Note that Google Benchmark requires Google Test to build and run the tests. This -dependency can be provided two ways: - -* Checkout the Google Test sources into `benchmark/googletest` as above. -* Otherwise, if `-DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON` is specified during - configuration, the library will automatically download and build any required - dependencies. - -If you do not wish to build and run the tests, add `-DBENCHMARK_ENABLE_GTEST_TESTS=OFF` -to `CMAKE_ARGS`. - - -## Installation Guide - -For Ubuntu and Debian Based System - -First make sure you have git and cmake installed (If not please install it) - -``` -sudo apt-get install git -sudo apt-get install cmake -``` - -Now, let's clone the repository and build it - -``` -git clone https://github.com/google/benchmark.git -cd benchmark -git clone https://github.com/google/googletest.git -mkdir build -cd build -cmake .. -DCMAKE_BUILD_TYPE=RELEASE -make -``` - -We need to install the library globally now - -``` -sudo make install -``` - -Now you have google/benchmark installed in your machine -Note: Don't forget to link to pthread library while building - -## Stable and Experimental Library Versions - -The main branch contains the latest stable version of the benchmarking library; -the API of which can be considered largely stable, with source breaking changes -being made only upon the release of a new major version. - -Newer, experimental, features are implemented and tested on the -[`v2` branch](https://github.com/google/benchmark/tree/v2). Users who wish -to use, test, and provide feedback on the new features are encouraged to try -this branch. However, this branch provides no stability guarantees and reserves -the right to change and break the API at any time. - -##Prerequisite knowledge - -Before attempting to understand this framework one should ideally have some familiarity with the structure and format of the Google Test framework, upon which it is based. Documentation for Google Test, including a "Getting Started" (primer) guide, is available here: -https://github.com/google/googletest/blob/master/googletest/docs/Documentation.md - - -## Example usage -### Basic usage -Define a function that executes the code to be measured. - -```c++ -#include - -static void BM_StringCreation(benchmark::State& state) { - for (auto _ : state) - std::string empty_string; -} -// Register the function as a benchmark -BENCHMARK(BM_StringCreation); - -// Define another benchmark -static void BM_StringCopy(benchmark::State& state) { - std::string x = "hello"; - for (auto _ : state) - std::string copy(x); -} -BENCHMARK(BM_StringCopy); - -BENCHMARK_MAIN(); -``` - -Don't forget to inform your linker to add benchmark library e.g. through -`-lbenchmark` compilation flag. Alternatively, you may leave out the -`BENCHMARK_MAIN();` at the end of the source file and link against -`-lbenchmark_main` to get the same default behavior. - -The benchmark library will reporting the timing for the code within the `for(...)` loop. - -### Passing arguments -Sometimes a family of benchmarks can be implemented with just one routine that -takes an extra argument to specify which one of the family of benchmarks to -run. For example, the following code defines a family of benchmarks for -measuring the speed of `memcpy()` calls of different lengths: - -```c++ -static void BM_memcpy(benchmark::State& state) { - char* src = new char[state.range(0)]; - char* dst = new char[state.range(0)]; - memset(src, 'x', state.range(0)); - for (auto _ : state) - memcpy(dst, src, state.range(0)); - state.SetBytesProcessed(int64_t(state.iterations()) * - int64_t(state.range(0))); - delete[] src; - delete[] dst; -} -BENCHMARK(BM_memcpy)->Arg(8)->Arg(64)->Arg(512)->Arg(1<<10)->Arg(8<<10); -``` - -The preceding code is quite repetitive, and can be replaced with the following -short-hand. The following invocation will pick a few appropriate arguments in -the specified range and will generate a benchmark for each such argument. - -```c++ -BENCHMARK(BM_memcpy)->Range(8, 8<<10); -``` - -By default the arguments in the range are generated in multiples of eight and -the command above selects [ 8, 64, 512, 4k, 8k ]. In the following code the -range multiplier is changed to multiples of two. - -```c++ -BENCHMARK(BM_memcpy)->RangeMultiplier(2)->Range(8, 8<<10); -``` -Now arguments generated are [ 8, 16, 32, 64, 128, 256, 512, 1024, 2k, 4k, 8k ]. - -You might have a benchmark that depends on two or more inputs. For example, the -following code defines a family of benchmarks for measuring the speed of set -insertion. - -```c++ -static void BM_SetInsert(benchmark::State& state) { - std::set data; - for (auto _ : state) { - state.PauseTiming(); - data = ConstructRandomSet(state.range(0)); - state.ResumeTiming(); - for (int j = 0; j < state.range(1); ++j) - data.insert(RandomNumber()); - } -} -BENCHMARK(BM_SetInsert) - ->Args({1<<10, 128}) - ->Args({2<<10, 128}) - ->Args({4<<10, 128}) - ->Args({8<<10, 128}) - ->Args({1<<10, 512}) - ->Args({2<<10, 512}) - ->Args({4<<10, 512}) - ->Args({8<<10, 512}); -``` - -The preceding code is quite repetitive, and can be replaced with the following -short-hand. The following macro will pick a few appropriate arguments in the -product of the two specified ranges and will generate a benchmark for each such -pair. - -```c++ -BENCHMARK(BM_SetInsert)->Ranges({{1<<10, 8<<10}, {128, 512}}); -``` - -For more complex patterns of inputs, passing a custom function to `Apply` allows -programmatic specification of an arbitrary set of arguments on which to run the -benchmark. The following example enumerates a dense range on one parameter, -and a sparse range on the second. - -```c++ -static void CustomArguments(benchmark::internal::Benchmark* b) { - for (int i = 0; i <= 10; ++i) - for (int j = 32; j <= 1024*1024; j *= 8) - b->Args({i, j}); -} -BENCHMARK(BM_SetInsert)->Apply(CustomArguments); -``` - -### Calculate asymptotic complexity (Big O) -Asymptotic complexity might be calculated for a family of benchmarks. The -following code will calculate the coefficient for the high-order term in the -running time and the normalized root-mean square error of string comparison. - -```c++ -static void BM_StringCompare(benchmark::State& state) { - std::string s1(state.range(0), '-'); - std::string s2(state.range(0), '-'); - for (auto _ : state) { - benchmark::DoNotOptimize(s1.compare(s2)); - } - state.SetComplexityN(state.range(0)); -} -BENCHMARK(BM_StringCompare) - ->RangeMultiplier(2)->Range(1<<10, 1<<18)->Complexity(benchmark::oN); -``` - -As shown in the following invocation, asymptotic complexity might also be -calculated automatically. - -```c++ -BENCHMARK(BM_StringCompare) - ->RangeMultiplier(2)->Range(1<<10, 1<<18)->Complexity(); -``` - -The following code will specify asymptotic complexity with a lambda function, -that might be used to customize high-order term calculation. - -```c++ -BENCHMARK(BM_StringCompare)->RangeMultiplier(2) - ->Range(1<<10, 1<<18)->Complexity([](int n)->double{return n; }); -``` - -### Templated benchmarks -Templated benchmarks work the same way: This example produces and consumes -messages of size `sizeof(v)` `range_x` times. It also outputs throughput in the -absence of multiprogramming. - -```c++ -template int BM_Sequential(benchmark::State& state) { - Q q; - typename Q::value_type v; - for (auto _ : state) { - for (int i = state.range(0); i--; ) - q.push(v); - for (int e = state.range(0); e--; ) - q.Wait(&v); - } - // actually messages, not bytes: - state.SetBytesProcessed( - static_cast(state.iterations())*state.range(0)); -} -BENCHMARK_TEMPLATE(BM_Sequential, WaitQueue)->Range(1<<0, 1<<10); -``` - -Three macros are provided for adding benchmark templates. - -```c++ -#ifdef BENCHMARK_HAS_CXX11 -#define BENCHMARK_TEMPLATE(func, ...) // Takes any number of parameters. -#else // C++ < C++11 -#define BENCHMARK_TEMPLATE(func, arg1) -#endif -#define BENCHMARK_TEMPLATE1(func, arg1) -#define BENCHMARK_TEMPLATE2(func, arg1, arg2) -``` - -### A Faster KeepRunning loop - -In C++11 mode, a ranged-based for loop should be used in preference to -the `KeepRunning` loop for running the benchmarks. For example: - -```c++ -static void BM_Fast(benchmark::State &state) { - for (auto _ : state) { - FastOperation(); - } -} -BENCHMARK(BM_Fast); -``` - -The reason the ranged-for loop is faster than using `KeepRunning`, is -because `KeepRunning` requires a memory load and store of the iteration count -ever iteration, whereas the ranged-for variant is able to keep the iteration count -in a register. - -For example, an empty inner loop of using the ranged-based for method looks like: - -```asm -# Loop Init - mov rbx, qword ptr [r14 + 104] - call benchmark::State::StartKeepRunning() - test rbx, rbx - je .LoopEnd -.LoopHeader: # =>This Inner Loop Header: Depth=1 - add rbx, -1 - jne .LoopHeader -.LoopEnd: -``` - -Compared to an empty `KeepRunning` loop, which looks like: - -```asm -.LoopHeader: # in Loop: Header=BB0_3 Depth=1 - cmp byte ptr [rbx], 1 - jne .LoopInit -.LoopBody: # =>This Inner Loop Header: Depth=1 - mov rax, qword ptr [rbx + 8] - lea rcx, [rax + 1] - mov qword ptr [rbx + 8], rcx - cmp rax, qword ptr [rbx + 104] - jb .LoopHeader - jmp .LoopEnd -.LoopInit: - mov rdi, rbx - call benchmark::State::StartKeepRunning() - jmp .LoopBody -.LoopEnd: -``` - -Unless C++03 compatibility is required, the ranged-for variant of writing -the benchmark loop should be preferred. - -## Passing arbitrary arguments to a benchmark -In C++11 it is possible to define a benchmark that takes an arbitrary number -of extra arguments. The `BENCHMARK_CAPTURE(func, test_case_name, ...args)` -macro creates a benchmark that invokes `func` with the `benchmark::State` as -the first argument followed by the specified `args...`. -The `test_case_name` is appended to the name of the benchmark and -should describe the values passed. - -```c++ -template -void BM_takes_args(benchmark::State& state, ExtraArgs&&... extra_args) { - [...] -} -// Registers a benchmark named "BM_takes_args/int_string_test" that passes -// the specified values to `extra_args`. -BENCHMARK_CAPTURE(BM_takes_args, int_string_test, 42, std::string("abc")); -``` -Note that elements of `...args` may refer to global variables. Users should -avoid modifying global state inside of a benchmark. - -## Using RegisterBenchmark(name, fn, args...) - -The `RegisterBenchmark(name, func, args...)` function provides an alternative -way to create and register benchmarks. -`RegisterBenchmark(name, func, args...)` creates, registers, and returns a -pointer to a new benchmark with the specified `name` that invokes -`func(st, args...)` where `st` is a `benchmark::State` object. - -Unlike the `BENCHMARK` registration macros, which can only be used at the global -scope, the `RegisterBenchmark` can be called anywhere. This allows for -benchmark tests to be registered programmatically. - -Additionally `RegisterBenchmark` allows any callable object to be registered -as a benchmark. Including capturing lambdas and function objects. - -For Example: -```c++ -auto BM_test = [](benchmark::State& st, auto Inputs) { /* ... */ }; - -int main(int argc, char** argv) { - for (auto& test_input : { /* ... */ }) - benchmark::RegisterBenchmark(test_input.name(), BM_test, test_input); - benchmark::Initialize(&argc, argv); - benchmark::RunSpecifiedBenchmarks(); -} -``` - -### Multithreaded benchmarks -In a multithreaded test (benchmark invoked by multiple threads simultaneously), -it is guaranteed that none of the threads will start until all have reached -the start of the benchmark loop, and all will have finished before any thread -exits the benchmark loop. (This behavior is also provided by the `KeepRunning()` -API) As such, any global setup or teardown can be wrapped in a check against the thread -index: - -```c++ -static void BM_MultiThreaded(benchmark::State& state) { - if (state.thread_index == 0) { - // Setup code here. - } - for (auto _ : state) { - // Run the test as normal. - } - if (state.thread_index == 0) { - // Teardown code here. - } -} -BENCHMARK(BM_MultiThreaded)->Threads(2); -``` - -If the benchmarked code itself uses threads and you want to compare it to -single-threaded code, you may want to use real-time ("wallclock") measurements -for latency comparisons: - -```c++ -BENCHMARK(BM_test)->Range(8, 8<<10)->UseRealTime(); -``` - -Without `UseRealTime`, CPU time is used by default. - - -## Manual timing -For benchmarking something for which neither CPU time nor real-time are -correct or accurate enough, completely manual timing is supported using -the `UseManualTime` function. - -When `UseManualTime` is used, the benchmarked code must call -`SetIterationTime` once per iteration of the benchmark loop to -report the manually measured time. - -An example use case for this is benchmarking GPU execution (e.g. OpenCL -or CUDA kernels, OpenGL or Vulkan or Direct3D draw calls), which cannot -be accurately measured using CPU time or real-time. Instead, they can be -measured accurately using a dedicated API, and these measurement results -can be reported back with `SetIterationTime`. - -```c++ -static void BM_ManualTiming(benchmark::State& state) { - int microseconds = state.range(0); - std::chrono::duration sleep_duration { - static_cast(microseconds) - }; - - for (auto _ : state) { - auto start = std::chrono::high_resolution_clock::now(); - // Simulate some useful workload with a sleep - std::this_thread::sleep_for(sleep_duration); - auto end = std::chrono::high_resolution_clock::now(); - - auto elapsed_seconds = - std::chrono::duration_cast>( - end - start); - - state.SetIterationTime(elapsed_seconds.count()); - } -} -BENCHMARK(BM_ManualTiming)->Range(1, 1<<17)->UseManualTime(); -``` - -### Preventing optimisation -To prevent a value or expression from being optimized away by the compiler -the `benchmark::DoNotOptimize(...)` and `benchmark::ClobberMemory()` -functions can be used. - -```c++ -static void BM_test(benchmark::State& state) { - for (auto _ : state) { - int x = 0; - for (int i=0; i < 64; ++i) { - benchmark::DoNotOptimize(x += i); - } - } -} -``` - -`DoNotOptimize()` forces the *result* of `` to be stored in either -memory or a register. For GNU based compilers it acts as read/write barrier -for global memory. More specifically it forces the compiler to flush pending -writes to memory and reload any other values as necessary. - -Note that `DoNotOptimize()` does not prevent optimizations on `` -in any way. `` may even be removed entirely when the result is already -known. For example: - -```c++ - /* Example 1: `` is removed entirely. */ - int foo(int x) { return x + 42; } - while (...) DoNotOptimize(foo(0)); // Optimized to DoNotOptimize(42); - - /* Example 2: Result of '' is only reused */ - int bar(int) __attribute__((const)); - while (...) DoNotOptimize(bar(0)); // Optimized to: - // int __result__ = bar(0); - // while (...) DoNotOptimize(__result__); -``` - -The second tool for preventing optimizations is `ClobberMemory()`. In essence -`ClobberMemory()` forces the compiler to perform all pending writes to global -memory. Memory managed by block scope objects must be "escaped" using -`DoNotOptimize(...)` before it can be clobbered. In the below example -`ClobberMemory()` prevents the call to `v.push_back(42)` from being optimized -away. - -```c++ -static void BM_vector_push_back(benchmark::State& state) { - for (auto _ : state) { - std::vector v; - v.reserve(1); - benchmark::DoNotOptimize(v.data()); // Allow v.data() to be clobbered. - v.push_back(42); - benchmark::ClobberMemory(); // Force 42 to be written to memory. - } -} -``` - -Note that `ClobberMemory()` is only available for GNU or MSVC based compilers. - -### Set time unit manually -If a benchmark runs a few milliseconds it may be hard to visually compare the -measured times, since the output data is given in nanoseconds per default. In -order to manually set the time unit, you can specify it manually: - -```c++ -BENCHMARK(BM_test)->Unit(benchmark::kMillisecond); -``` - -## Controlling number of iterations -In all cases, the number of iterations for which the benchmark is run is -governed by the amount of time the benchmark takes. Concretely, the number of -iterations is at least one, not more than 1e9, until CPU time is greater than -the minimum time, or the wallclock time is 5x minimum time. The minimum time is -set as a flag `--benchmark_min_time` or per benchmark by calling `MinTime` on -the registered benchmark object. - -## Reporting the mean, median and standard deviation by repeated benchmarks -By default each benchmark is run once and that single result is reported. -However benchmarks are often noisy and a single result may not be representative -of the overall behavior. For this reason it's possible to repeatedly rerun the -benchmark. - -The number of runs of each benchmark is specified globally by the -`--benchmark_repetitions` flag or on a per benchmark basis by calling -`Repetitions` on the registered benchmark object. When a benchmark is run more -than once the mean, median and standard deviation of the runs will be reported. - -Additionally the `--benchmark_report_aggregates_only={true|false}` flag or -`ReportAggregatesOnly(bool)` function can be used to change how repeated tests -are reported. By default the result of each repeated run is reported. When this -option is `true` only the mean, median and standard deviation of the runs is reported. -Calling `ReportAggregatesOnly(bool)` on a registered benchmark object overrides -the value of the flag for that benchmark. - -## User-defined statistics for repeated benchmarks -While having mean, median and standard deviation is nice, this may not be -enough for everyone. For example you may want to know what is the largest -observation, e.g. because you have some real-time constraints. This is easy. -The following code will specify a custom statistic to be calculated, defined -by a lambda function. - -```c++ -void BM_spin_empty(benchmark::State& state) { - for (auto _ : state) { - for (int x = 0; x < state.range(0); ++x) { - benchmark::DoNotOptimize(x); - } - } -} - -BENCHMARK(BM_spin_empty) - ->ComputeStatistics("max", [](const std::vector& v) -> double { - return *(std::max_element(std::begin(v), std::end(v))); - }) - ->Arg(512); -``` - -## Fixtures -Fixture tests are created by -first defining a type that derives from `::benchmark::Fixture` and then -creating/registering the tests using the following macros: - -* `BENCHMARK_F(ClassName, Method)` -* `BENCHMARK_DEFINE_F(ClassName, Method)` -* `BENCHMARK_REGISTER_F(ClassName, Method)` - -For Example: - -```c++ -class MyFixture : public benchmark::Fixture {}; - -BENCHMARK_F(MyFixture, FooTest)(benchmark::State& st) { - for (auto _ : st) { - ... - } -} - -BENCHMARK_DEFINE_F(MyFixture, BarTest)(benchmark::State& st) { - for (auto _ : st) { - ... - } -} -/* BarTest is NOT registered */ -BENCHMARK_REGISTER_F(MyFixture, BarTest)->Threads(2); -/* BarTest is now registered */ -``` - -### Templated fixtures -Also you can create templated fixture by using the following macros: - -* `BENCHMARK_TEMPLATE_F(ClassName, Method, ...)` -* `BENCHMARK_TEMPLATE_DEFINE_F(ClassName, Method, ...)` - -For example: -```c++ -template -class MyFixture : public benchmark::Fixture {}; - -BENCHMARK_TEMPLATE_F(MyFixture, IntTest, int)(benchmark::State& st) { - for (auto _ : st) { - ... - } -} - -BENCHMARK_TEMPLATE_DEFINE_F(MyFixture, DoubleTest, double)(benchmark::State& st) { - for (auto _ : st) { - ... - } -} - -BENCHMARK_REGISTER_F(MyFixture, DoubleTest)->Threads(2); -``` - -## User-defined counters - -You can add your own counters with user-defined names. The example below -will add columns "Foo", "Bar" and "Baz" in its output: - -```c++ -static void UserCountersExample1(benchmark::State& state) { - double numFoos = 0, numBars = 0, numBazs = 0; - for (auto _ : state) { - // ... count Foo,Bar,Baz events - } - state.counters["Foo"] = numFoos; - state.counters["Bar"] = numBars; - state.counters["Baz"] = numBazs; -} -``` - -The `state.counters` object is a `std::map` with `std::string` keys -and `Counter` values. The latter is a `double`-like class, via an implicit -conversion to `double&`. Thus you can use all of the standard arithmetic -assignment operators (`=,+=,-=,*=,/=`) to change the value of each counter. - -In multithreaded benchmarks, each counter is set on the calling thread only. -When the benchmark finishes, the counters from each thread will be summed; -the resulting sum is the value which will be shown for the benchmark. - -The `Counter` constructor accepts two parameters: the value as a `double` -and a bit flag which allows you to show counters as rates and/or as -per-thread averages: - -```c++ - // sets a simple counter - state.counters["Foo"] = numFoos; - - // Set the counter as a rate. It will be presented divided - // by the duration of the benchmark. - state.counters["FooRate"] = Counter(numFoos, benchmark::Counter::kIsRate); - - // Set the counter as a thread-average quantity. It will - // be presented divided by the number of threads. - state.counters["FooAvg"] = Counter(numFoos, benchmark::Counter::kAvgThreads); - - // There's also a combined flag: - state.counters["FooAvgRate"] = Counter(numFoos,benchmark::Counter::kAvgThreadsRate); -``` - -When you're compiling in C++11 mode or later you can use `insert()` with -`std::initializer_list`: - -```c++ - // With C++11, this can be done: - state.counters.insert({{"Foo", numFoos}, {"Bar", numBars}, {"Baz", numBazs}}); - // ... instead of: - state.counters["Foo"] = numFoos; - state.counters["Bar"] = numBars; - state.counters["Baz"] = numBazs; -``` - -### Counter reporting - -When using the console reporter, by default, user counters are are printed at -the end after the table, the same way as ``bytes_processed`` and -``items_processed``. This is best for cases in which there are few counters, -or where there are only a couple of lines per benchmark. Here's an example of -the default output: - -``` ------------------------------------------------------------------------------- -Benchmark Time CPU Iterations UserCounters... ------------------------------------------------------------------------------- -BM_UserCounter/threads:8 2248 ns 10277 ns 68808 Bar=16 Bat=40 Baz=24 Foo=8 -BM_UserCounter/threads:1 9797 ns 9788 ns 71523 Bar=2 Bat=5 Baz=3 Foo=1024m -BM_UserCounter/threads:2 4924 ns 9842 ns 71036 Bar=4 Bat=10 Baz=6 Foo=2 -BM_UserCounter/threads:4 2589 ns 10284 ns 68012 Bar=8 Bat=20 Baz=12 Foo=4 -BM_UserCounter/threads:8 2212 ns 10287 ns 68040 Bar=16 Bat=40 Baz=24 Foo=8 -BM_UserCounter/threads:16 1782 ns 10278 ns 68144 Bar=32 Bat=80 Baz=48 Foo=16 -BM_UserCounter/threads:32 1291 ns 10296 ns 68256 Bar=64 Bat=160 Baz=96 Foo=32 -BM_UserCounter/threads:4 2615 ns 10307 ns 68040 Bar=8 Bat=20 Baz=12 Foo=4 -BM_Factorial 26 ns 26 ns 26608979 40320 -BM_Factorial/real_time 26 ns 26 ns 26587936 40320 -BM_CalculatePiRange/1 16 ns 16 ns 45704255 0 -BM_CalculatePiRange/8 73 ns 73 ns 9520927 3.28374 -BM_CalculatePiRange/64 609 ns 609 ns 1140647 3.15746 -BM_CalculatePiRange/512 4900 ns 4901 ns 142696 3.14355 -``` - -If this doesn't suit you, you can print each counter as a table column by -passing the flag `--benchmark_counters_tabular=true` to the benchmark -application. This is best for cases in which there are a lot of counters, or -a lot of lines per individual benchmark. Note that this will trigger a -reprinting of the table header any time the counter set changes between -individual benchmarks. Here's an example of corresponding output when -`--benchmark_counters_tabular=true` is passed: - -``` ---------------------------------------------------------------------------------------- -Benchmark Time CPU Iterations Bar Bat Baz Foo ---------------------------------------------------------------------------------------- -BM_UserCounter/threads:8 2198 ns 9953 ns 70688 16 40 24 8 -BM_UserCounter/threads:1 9504 ns 9504 ns 73787 2 5 3 1 -BM_UserCounter/threads:2 4775 ns 9550 ns 72606 4 10 6 2 -BM_UserCounter/threads:4 2508 ns 9951 ns 70332 8 20 12 4 -BM_UserCounter/threads:8 2055 ns 9933 ns 70344 16 40 24 8 -BM_UserCounter/threads:16 1610 ns 9946 ns 70720 32 80 48 16 -BM_UserCounter/threads:32 1192 ns 9948 ns 70496 64 160 96 32 -BM_UserCounter/threads:4 2506 ns 9949 ns 70332 8 20 12 4 --------------------------------------------------------------- -Benchmark Time CPU Iterations --------------------------------------------------------------- -BM_Factorial 26 ns 26 ns 26392245 40320 -BM_Factorial/real_time 26 ns 26 ns 26494107 40320 -BM_CalculatePiRange/1 15 ns 15 ns 45571597 0 -BM_CalculatePiRange/8 74 ns 74 ns 9450212 3.28374 -BM_CalculatePiRange/64 595 ns 595 ns 1173901 3.15746 -BM_CalculatePiRange/512 4752 ns 4752 ns 147380 3.14355 -BM_CalculatePiRange/4k 37970 ns 37972 ns 18453 3.14184 -BM_CalculatePiRange/32k 303733 ns 303744 ns 2305 3.14162 -BM_CalculatePiRange/256k 2434095 ns 2434186 ns 288 3.1416 -BM_CalculatePiRange/1024k 9721140 ns 9721413 ns 71 3.14159 -BM_CalculatePi/threads:8 2255 ns 9943 ns 70936 -``` -Note above the additional header printed when the benchmark changes from -``BM_UserCounter`` to ``BM_Factorial``. This is because ``BM_Factorial`` does -not have the same counter set as ``BM_UserCounter``. - -## Exiting Benchmarks in Error - -When errors caused by external influences, such as file I/O and network -communication, occur within a benchmark the -`State::SkipWithError(const char* msg)` function can be used to skip that run -of benchmark and report the error. Note that only future iterations of the -`KeepRunning()` are skipped. For the ranged-for version of the benchmark loop -Users must explicitly exit the loop, otherwise all iterations will be performed. -Users may explicitly return to exit the benchmark immediately. - -The `SkipWithError(...)` function may be used at any point within the benchmark, -including before and after the benchmark loop. - -For example: - -```c++ -static void BM_test(benchmark::State& state) { - auto resource = GetResource(); - if (!resource.good()) { - state.SkipWithError("Resource is not good!"); - // KeepRunning() loop will not be entered. - } - for (state.KeepRunning()) { - auto data = resource.read_data(); - if (!resource.good()) { - state.SkipWithError("Failed to read data!"); - break; // Needed to skip the rest of the iteration. - } - do_stuff(data); - } -} - -static void BM_test_ranged_fo(benchmark::State & state) { - state.SkipWithError("test will not be entered"); - for (auto _ : state) { - state.SkipWithError("Failed!"); - break; // REQUIRED to prevent all further iterations. - } -} -``` - -## Running a subset of the benchmarks - -The `--benchmark_filter=` option can be used to only run the benchmarks -which match the specified ``. For example: - -```bash -$ ./run_benchmarks.x --benchmark_filter=BM_memcpy/32 -Run on (1 X 2300 MHz CPU ) -2016-06-25 19:34:24 -Benchmark Time CPU Iterations ----------------------------------------------------- -BM_memcpy/32 11 ns 11 ns 79545455 -BM_memcpy/32k 2181 ns 2185 ns 324074 -BM_memcpy/32 12 ns 12 ns 54687500 -BM_memcpy/32k 1834 ns 1837 ns 357143 -``` - - -## Output Formats -The library supports multiple output formats. Use the -`--benchmark_format=` flag to set the format type. `console` -is the default format. - -The Console format is intended to be a human readable format. By default -the format generates color output. Context is output on stderr and the -tabular data on stdout. Example tabular output looks like: -``` -Benchmark Time(ns) CPU(ns) Iterations ----------------------------------------------------------------------- -BM_SetInsert/1024/1 28928 29349 23853 133.097kB/s 33.2742k items/s -BM_SetInsert/1024/8 32065 32913 21375 949.487kB/s 237.372k items/s -BM_SetInsert/1024/10 33157 33648 21431 1.13369MB/s 290.225k items/s -``` - -The JSON format outputs human readable json split into two top level attributes. -The `context` attribute contains information about the run in general, including -information about the CPU and the date. -The `benchmarks` attribute contains a list of every benchmark run. Example json -output looks like: -```json -{ - "context": { - "date": "2015/03/17-18:40:25", - "num_cpus": 40, - "mhz_per_cpu": 2801, - "cpu_scaling_enabled": false, - "build_type": "debug" - }, - "benchmarks": [ - { - "name": "BM_SetInsert/1024/1", - "iterations": 94877, - "real_time": 29275, - "cpu_time": 29836, - "bytes_per_second": 134066, - "items_per_second": 33516 - }, - { - "name": "BM_SetInsert/1024/8", - "iterations": 21609, - "real_time": 32317, - "cpu_time": 32429, - "bytes_per_second": 986770, - "items_per_second": 246693 - }, - { - "name": "BM_SetInsert/1024/10", - "iterations": 21393, - "real_time": 32724, - "cpu_time": 33355, - "bytes_per_second": 1199226, - "items_per_second": 299807 - } - ] -} -``` - -The CSV format outputs comma-separated values. The `context` is output on stderr -and the CSV itself on stdout. Example CSV output looks like: -``` -name,iterations,real_time,cpu_time,bytes_per_second,items_per_second,label -"BM_SetInsert/1024/1",65465,17890.7,8407.45,475768,118942, -"BM_SetInsert/1024/8",116606,18810.1,9766.64,3.27646e+06,819115, -"BM_SetInsert/1024/10",106365,17238.4,8421.53,4.74973e+06,1.18743e+06, -``` - -## Output Files -The library supports writing the output of the benchmark to a file specified -by `--benchmark_out=`. The format of the output can be specified -using `--benchmark_out_format={json|console|csv}`. Specifying -`--benchmark_out` does not suppress the console output. - -## Debug vs Release -By default, benchmark builds as a debug library. You will see a warning in the output when this is the case. To build it as a release library instead, use: - -``` -cmake -DCMAKE_BUILD_TYPE=Release -``` - -To enable link-time optimisation, use - -``` -cmake -DCMAKE_BUILD_TYPE=Release -DBENCHMARK_ENABLE_LTO=true -``` - -If you are using gcc, you might need to set `GCC_AR` and `GCC_RANLIB` cmake cache variables, if autodetection fails. -If you are using clang, you may need to set `LLVMAR_EXECUTABLE`, `LLVMNM_EXECUTABLE` and `LLVMRANLIB_EXECUTABLE` cmake cache variables. - -## Linking against the library - -When the library is built using GCC it is necessary to link with `-pthread`, -due to how GCC implements `std::thread`. - -For GCC 4.x failing to link to pthreads will lead to runtime exceptions, not linker errors. -See [issue #67](https://github.com/google/benchmark/issues/67) for more details. - -## Compiler Support - -Google Benchmark uses C++11 when building the library. As such we require -a modern C++ toolchain, both compiler and standard library. - -The following minimum versions are strongly recommended build the library: - -* GCC 4.8 -* Clang 3.4 -* Visual Studio 2013 -* Intel 2015 Update 1 - -Anything older *may* work. - -Note: Using the library and its headers in C++03 is supported. C++11 is only -required to build the library. - -## Disable CPU frequency scaling -If you see this error: -``` -***WARNING*** CPU scaling is enabled, the benchmark real time measurements may be noisy and will incur extra overhead. -``` -you might want to disable the CPU frequency scaling while running the benchmark: -```bash -sudo cpupower frequency-set --governor performance -./mybench -sudo cpupower frequency-set --governor powersave -``` - -# Known Issues - -### Windows with CMake - -* Users must manually link `shlwapi.lib`. Failure to do so may result -in unresolved symbols. - -### Solaris - -* Users must explicitly link with kstat library (-lkstat compilation flag). diff --git a/benchmarks/thirdparty/benchmark/WORKSPACE b/benchmarks/thirdparty/benchmark/WORKSPACE deleted file mode 100755 index 54734f1ea5..0000000000 --- a/benchmarks/thirdparty/benchmark/WORKSPACE +++ /dev/null @@ -1,7 +0,0 @@ -workspace(name = "com_github_google_benchmark") - -http_archive( - name = "com_google_googletest", - urls = ["https://github.com/google/googletest/archive/3f0cf6b62ad1eb50d8736538363d3580dd640c3e.zip"], - strip_prefix = "googletest-3f0cf6b62ad1eb50d8736538363d3580dd640c3e", -) diff --git a/benchmarks/thirdparty/benchmark/appveyor.yml b/benchmarks/thirdparty/benchmark/appveyor.yml deleted file mode 100755 index e99c6e77f0..0000000000 --- a/benchmarks/thirdparty/benchmark/appveyor.yml +++ /dev/null @@ -1,56 +0,0 @@ -version: '{build}' - -image: Visual Studio 2017 - -configuration: - - Debug - - Release - -environment: - matrix: - - compiler: msvc-15-seh - generator: "Visual Studio 15 2017" - - - compiler: msvc-15-seh - generator: "Visual Studio 15 2017 Win64" - - - compiler: msvc-14-seh - generator: "Visual Studio 14 2015" - - - compiler: msvc-14-seh - generator: "Visual Studio 14 2015 Win64" - - - compiler: msvc-12-seh - generator: "Visual Studio 12 2013" - - - compiler: msvc-12-seh - generator: "Visual Studio 12 2013 Win64" - - - compiler: gcc-5.3.0-posix - generator: "MinGW Makefiles" - cxx_path: 'C:\mingw-w64\i686-5.3.0-posix-dwarf-rt_v4-rev0\mingw32\bin' - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015 - -matrix: - fast_finish: true - -install: - # git bash conflicts with MinGW makefiles - - if "%generator%"=="MinGW Makefiles" (set "PATH=%PATH:C:\Program Files\Git\usr\bin;=%") - - if not "%cxx_path%"=="" (set "PATH=%PATH%;%cxx_path%") - -build_script: - - md _build -Force - - cd _build - - echo %configuration% - - cmake -G "%generator%" "-DCMAKE_BUILD_TYPE=%configuration%" -DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON .. - - cmake --build . --config %configuration% - -test_script: - - ctest -c %configuration% --timeout 300 --output-on-failure - -artifacts: - - path: '_build/CMakeFiles/*.log' - name: logs - - path: '_build/Testing/**/*.xml' - name: test_results diff --git a/benchmarks/thirdparty/benchmark/cmake/AddCXXCompilerFlag.cmake b/benchmarks/thirdparty/benchmark/cmake/AddCXXCompilerFlag.cmake deleted file mode 100755 index d0d2099814..0000000000 --- a/benchmarks/thirdparty/benchmark/cmake/AddCXXCompilerFlag.cmake +++ /dev/null @@ -1,74 +0,0 @@ -# - Adds a compiler flag if it is supported by the compiler -# -# This function checks that the supplied compiler flag is supported and then -# adds it to the corresponding compiler flags -# -# add_cxx_compiler_flag( []) -# -# - Example -# -# include(AddCXXCompilerFlag) -# add_cxx_compiler_flag(-Wall) -# add_cxx_compiler_flag(-no-strict-aliasing RELEASE) -# Requires CMake 2.6+ - -if(__add_cxx_compiler_flag) - return() -endif() -set(__add_cxx_compiler_flag INCLUDED) - -include(CheckCXXCompilerFlag) - -function(mangle_compiler_flag FLAG OUTPUT) - string(TOUPPER "HAVE_CXX_FLAG_${FLAG}" SANITIZED_FLAG) - string(REPLACE "+" "X" SANITIZED_FLAG ${SANITIZED_FLAG}) - string(REGEX REPLACE "[^A-Za-z_0-9]" "_" SANITIZED_FLAG ${SANITIZED_FLAG}) - string(REGEX REPLACE "_+" "_" SANITIZED_FLAG ${SANITIZED_FLAG}) - set(${OUTPUT} "${SANITIZED_FLAG}" PARENT_SCOPE) -endfunction(mangle_compiler_flag) - -function(add_cxx_compiler_flag FLAG) - mangle_compiler_flag("${FLAG}" MANGLED_FLAG) - set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}") - set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${FLAG}") - check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG}) - set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}") - if(${MANGLED_FLAG}) - set(VARIANT ${ARGV1}) - if(ARGV1) - string(TOUPPER "_${VARIANT}" VARIANT) - endif() - set(CMAKE_CXX_FLAGS${VARIANT} "${CMAKE_CXX_FLAGS${VARIANT}} ${BENCHMARK_CXX_FLAGS${VARIANT}} ${FLAG}" PARENT_SCOPE) - endif() -endfunction() - -function(add_required_cxx_compiler_flag FLAG) - mangle_compiler_flag("${FLAG}" MANGLED_FLAG) - set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}") - set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${FLAG}") - check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG}) - set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}") - if(${MANGLED_FLAG}) - set(VARIANT ${ARGV1}) - if(ARGV1) - string(TOUPPER "_${VARIANT}" VARIANT) - endif() - set(CMAKE_CXX_FLAGS${VARIANT} "${CMAKE_CXX_FLAGS${VARIANT}} ${FLAG}" PARENT_SCOPE) - set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE) - set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE) - set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE) - set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${FLAG}" PARENT_SCOPE) - else() - message(FATAL_ERROR "Required flag '${FLAG}' is not supported by the compiler") - endif() -endfunction() - -function(check_cxx_warning_flag FLAG) - mangle_compiler_flag("${FLAG}" MANGLED_FLAG) - set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}") - # Add -Werror to ensure the compiler generates an error if the warning flag - # doesn't exist. - set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -Werror ${FLAG}") - check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG}) - set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}") -endfunction() diff --git a/benchmarks/thirdparty/benchmark/cmake/CXXFeatureCheck.cmake b/benchmarks/thirdparty/benchmark/cmake/CXXFeatureCheck.cmake deleted file mode 100755 index c4c4d660f1..0000000000 --- a/benchmarks/thirdparty/benchmark/cmake/CXXFeatureCheck.cmake +++ /dev/null @@ -1,64 +0,0 @@ -# - Compile and run code to check for C++ features -# -# This functions compiles a source file under the `cmake` folder -# and adds the corresponding `HAVE_[FILENAME]` flag to the CMake -# environment -# -# cxx_feature_check( []) -# -# - Example -# -# include(CXXFeatureCheck) -# cxx_feature_check(STD_REGEX) -# Requires CMake 2.8.12+ - -if(__cxx_feature_check) - return() -endif() -set(__cxx_feature_check INCLUDED) - -function(cxx_feature_check FILE) - string(TOLOWER ${FILE} FILE) - string(TOUPPER ${FILE} VAR) - string(TOUPPER "HAVE_${VAR}" FEATURE) - if (DEFINED HAVE_${VAR}) - set(HAVE_${VAR} 1 PARENT_SCOPE) - add_definitions(-DHAVE_${VAR}) - return() - endif() - - if (NOT DEFINED COMPILE_${FEATURE}) - message("-- Performing Test ${FEATURE}") - if(CMAKE_CROSSCOMPILING) - try_compile(COMPILE_${FEATURE} - ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp - CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS} - LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES}) - if(COMPILE_${FEATURE}) - message(WARNING - "If you see build failures due to cross compilation, try setting HAVE_${VAR} to 0") - set(RUN_${FEATURE} 0) - else() - set(RUN_${FEATURE} 1) - endif() - else() - message("-- Performing Test ${FEATURE}") - try_run(RUN_${FEATURE} COMPILE_${FEATURE} - ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp - CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS} - LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES}) - endif() - endif() - - if(RUN_${FEATURE} EQUAL 0) - message("-- Performing Test ${FEATURE} -- success") - set(HAVE_${VAR} 1 PARENT_SCOPE) - add_definitions(-DHAVE_${VAR}) - else() - if(NOT COMPILE_${FEATURE}) - message("-- Performing Test ${FEATURE} -- failed to compile") - else() - message("-- Performing Test ${FEATURE} -- compiled but failed to run") - endif() - endif() -endfunction() diff --git a/benchmarks/thirdparty/benchmark/cmake/Config.cmake.in b/benchmarks/thirdparty/benchmark/cmake/Config.cmake.in deleted file mode 100755 index 6e9256eea8..0000000000 --- a/benchmarks/thirdparty/benchmark/cmake/Config.cmake.in +++ /dev/null @@ -1 +0,0 @@ -include("${CMAKE_CURRENT_LIST_DIR}/@targets_export_name@.cmake") diff --git a/benchmarks/thirdparty/benchmark/cmake/GetGitVersion.cmake b/benchmarks/thirdparty/benchmark/cmake/GetGitVersion.cmake deleted file mode 100755 index 88cebe3a1c..0000000000 --- a/benchmarks/thirdparty/benchmark/cmake/GetGitVersion.cmake +++ /dev/null @@ -1,54 +0,0 @@ -# - Returns a version string from Git tags -# -# This function inspects the annotated git tags for the project and returns a string -# into a CMake variable -# -# get_git_version() -# -# - Example -# -# include(GetGitVersion) -# get_git_version(GIT_VERSION) -# -# Requires CMake 2.8.11+ -find_package(Git) - -if(__get_git_version) - return() -endif() -set(__get_git_version INCLUDED) - -function(get_git_version var) - if(GIT_EXECUTABLE) - execute_process(COMMAND ${GIT_EXECUTABLE} describe --match "v[0-9]*.[0-9]*.[0-9]*" --abbrev=8 - WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} - RESULT_VARIABLE status - OUTPUT_VARIABLE GIT_VERSION - ERROR_QUIET) - if(${status}) - set(GIT_VERSION "v0.0.0") - else() - string(STRIP ${GIT_VERSION} GIT_VERSION) - string(REGEX REPLACE "-[0-9]+-g" "-" GIT_VERSION ${GIT_VERSION}) - endif() - - # Work out if the repository is dirty - execute_process(COMMAND ${GIT_EXECUTABLE} update-index -q --refresh - WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} - OUTPUT_QUIET - ERROR_QUIET) - execute_process(COMMAND ${GIT_EXECUTABLE} diff-index --name-only HEAD -- - WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} - OUTPUT_VARIABLE GIT_DIFF_INDEX - ERROR_QUIET) - string(COMPARE NOTEQUAL "${GIT_DIFF_INDEX}" "" GIT_DIRTY) - if (${GIT_DIRTY}) - set(GIT_VERSION "${GIT_VERSION}-dirty") - endif() - else() - set(GIT_VERSION "v0.0.0") - endif() - - message("-- git Version: ${GIT_VERSION}") - set(${var} ${GIT_VERSION} PARENT_SCOPE) -endfunction() diff --git a/benchmarks/thirdparty/benchmark/cmake/HandleGTest.cmake b/benchmarks/thirdparty/benchmark/cmake/HandleGTest.cmake deleted file mode 100755 index 7ce1a633d6..0000000000 --- a/benchmarks/thirdparty/benchmark/cmake/HandleGTest.cmake +++ /dev/null @@ -1,113 +0,0 @@ - -include(split_list) - -macro(build_external_gtest) - include(ExternalProject) - set(GTEST_FLAGS "") - if (BENCHMARK_USE_LIBCXX) - if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang") - list(APPEND GTEST_FLAGS -stdlib=libc++) - else() - message(WARNING "Unsupported compiler (${CMAKE_CXX_COMPILER}) when using libc++") - endif() - endif() - if (BENCHMARK_BUILD_32_BITS) - list(APPEND GTEST_FLAGS -m32) - endif() - if (NOT "${CMAKE_CXX_FLAGS}" STREQUAL "") - list(APPEND GTEST_FLAGS ${CMAKE_CXX_FLAGS}) - endif() - string(TOUPPER "${CMAKE_BUILD_TYPE}" GTEST_BUILD_TYPE) - if ("${GTEST_BUILD_TYPE}" STREQUAL "COVERAGE") - set(GTEST_BUILD_TYPE "DEBUG") - endif() - # FIXME: Since 10/Feb/2017 the googletest trunk has had a bug where - # -Werror=unused-function fires during the build on OS X. This is a temporary - # workaround to keep our travis bots from failing. It should be removed - # once gtest is fixed. - if (NOT "${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") - list(APPEND GTEST_FLAGS "-Wno-unused-function") - endif() - split_list(GTEST_FLAGS) - set(EXCLUDE_FROM_ALL_OPT "") - set(EXCLUDE_FROM_ALL_VALUE "") - if (${CMAKE_VERSION} VERSION_GREATER "3.0.99") - set(EXCLUDE_FROM_ALL_OPT "EXCLUDE_FROM_ALL") - set(EXCLUDE_FROM_ALL_VALUE "ON") - endif() - ExternalProject_Add(googletest - ${EXCLUDE_FROM_ALL_OPT} ${EXCLUDE_FROM_ALL_VALUE} - GIT_REPOSITORY https://github.com/google/googletest.git - GIT_TAG master - PREFIX "${CMAKE_BINARY_DIR}/googletest" - INSTALL_DIR "${CMAKE_BINARY_DIR}/googletest" - CMAKE_CACHE_ARGS - -DCMAKE_BUILD_TYPE:STRING=${GTEST_BUILD_TYPE} - -DCMAKE_C_COMPILER:STRING=${CMAKE_C_COMPILER} - -DCMAKE_CXX_COMPILER:STRING=${CMAKE_CXX_COMPILER} - -DCMAKE_INSTALL_PREFIX:PATH= - -DCMAKE_INSTALL_LIBDIR:PATH=/lib - -DCMAKE_CXX_FLAGS:STRING=${GTEST_FLAGS} - -Dgtest_force_shared_crt:BOOL=ON - ) - - ExternalProject_Get_Property(googletest install_dir) - set(GTEST_INCLUDE_DIRS ${install_dir}/include) - file(MAKE_DIRECTORY ${GTEST_INCLUDE_DIRS}) - - set(LIB_SUFFIX "${CMAKE_STATIC_LIBRARY_SUFFIX}") - set(LIB_PREFIX "${CMAKE_STATIC_LIBRARY_PREFIX}") - if("${GTEST_BUILD_TYPE}" STREQUAL "DEBUG") - set(LIB_SUFFIX "d${CMAKE_STATIC_LIBRARY_SUFFIX}") - endif() - - # Use gmock_main instead of gtest_main because it initializes gtest as well. - # Note: The libraries are listed in reverse order of their dependancies. - foreach(LIB gtest gmock gmock_main) - add_library(${LIB} UNKNOWN IMPORTED) - set_target_properties(${LIB} PROPERTIES - IMPORTED_LOCATION ${install_dir}/lib/${LIB_PREFIX}${LIB}${LIB_SUFFIX} - INTERFACE_INCLUDE_DIRECTORIES ${GTEST_INCLUDE_DIRS} - INTERFACE_LINK_LIBRARIES "${GTEST_BOTH_LIBRARIES}" - ) - add_dependencies(${LIB} googletest) - list(APPEND GTEST_BOTH_LIBRARIES ${LIB}) - endforeach() -endmacro(build_external_gtest) - -if (BENCHMARK_ENABLE_GTEST_TESTS) - if (IS_DIRECTORY ${CMAKE_SOURCE_DIR}/googletest) - set(GTEST_ROOT "${CMAKE_SOURCE_DIR}/googletest") - set(INSTALL_GTEST OFF CACHE INTERNAL "") - set(INSTALL_GMOCK OFF CACHE INTERNAL "") - add_subdirectory(${CMAKE_SOURCE_DIR}/googletest) - set(GTEST_BOTH_LIBRARIES gtest gmock gmock_main) - foreach(HEADER test mock) - # CMake 2.8 and older don't respect INTERFACE_INCLUDE_DIRECTORIES, so we - # have to add the paths ourselves. - set(HFILE g${HEADER}/g${HEADER}.h) - set(HPATH ${GTEST_ROOT}/google${HEADER}/include) - find_path(HEADER_PATH_${HEADER} ${HFILE} - NO_DEFAULT_PATHS - HINTS ${HPATH} - ) - if (NOT HEADER_PATH_${HEADER}) - message(FATAL_ERROR "Failed to find header ${HFILE} in ${HPATH}") - endif() - list(APPEND GTEST_INCLUDE_DIRS ${HEADER_PATH_${HEADER}}) - endforeach() - elseif(BENCHMARK_DOWNLOAD_DEPENDENCIES) - build_external_gtest() - else() - find_package(GTest REQUIRED) - find_path(GMOCK_INCLUDE_DIRS gmock/gmock.h - HINTS ${GTEST_INCLUDE_DIRS}) - if (NOT GMOCK_INCLUDE_DIRS) - message(FATAL_ERROR "Failed to find header gmock/gmock.h with hint ${GTEST_INCLUDE_DIRS}") - endif() - set(GTEST_INCLUDE_DIRS ${GTEST_INCLUDE_DIRS} ${GMOCK_INCLUDE_DIRS}) - # FIXME: We don't currently require the gmock library to build the tests, - # and it's likely we won't find it, so we don't try. As long as we've - # found the gmock/gmock.h header and gtest_main that should be good enough. - endif() -endif() diff --git a/benchmarks/thirdparty/benchmark/cmake/Modules/FindLLVMAr.cmake b/benchmarks/thirdparty/benchmark/cmake/Modules/FindLLVMAr.cmake deleted file mode 100755 index 23469813cf..0000000000 --- a/benchmarks/thirdparty/benchmark/cmake/Modules/FindLLVMAr.cmake +++ /dev/null @@ -1,16 +0,0 @@ -include(FeatureSummary) - -find_program(LLVMAR_EXECUTABLE - NAMES llvm-ar - DOC "The llvm-ar executable" - ) - -include(FindPackageHandleStandardArgs) -find_package_handle_standard_args(LLVMAr - DEFAULT_MSG - LLVMAR_EXECUTABLE) - -SET_PACKAGE_PROPERTIES(LLVMAr PROPERTIES - URL https://llvm.org/docs/CommandGuide/llvm-ar.html - DESCRIPTION "create, modify, and extract from archives" -) diff --git a/benchmarks/thirdparty/benchmark/cmake/Modules/FindLLVMNm.cmake b/benchmarks/thirdparty/benchmark/cmake/Modules/FindLLVMNm.cmake deleted file mode 100755 index e56430a04f..0000000000 --- a/benchmarks/thirdparty/benchmark/cmake/Modules/FindLLVMNm.cmake +++ /dev/null @@ -1,16 +0,0 @@ -include(FeatureSummary) - -find_program(LLVMNM_EXECUTABLE - NAMES llvm-nm - DOC "The llvm-nm executable" - ) - -include(FindPackageHandleStandardArgs) -find_package_handle_standard_args(LLVMNm - DEFAULT_MSG - LLVMNM_EXECUTABLE) - -SET_PACKAGE_PROPERTIES(LLVMNm PROPERTIES - URL https://llvm.org/docs/CommandGuide/llvm-nm.html - DESCRIPTION "list LLVM bitcode and object file’s symbol table" -) diff --git a/benchmarks/thirdparty/benchmark/cmake/Modules/FindLLVMRanLib.cmake b/benchmarks/thirdparty/benchmark/cmake/Modules/FindLLVMRanLib.cmake deleted file mode 100755 index 7b53e1a790..0000000000 --- a/benchmarks/thirdparty/benchmark/cmake/Modules/FindLLVMRanLib.cmake +++ /dev/null @@ -1,15 +0,0 @@ -include(FeatureSummary) - -find_program(LLVMRANLIB_EXECUTABLE - NAMES llvm-ranlib - DOC "The llvm-ranlib executable" - ) - -include(FindPackageHandleStandardArgs) -find_package_handle_standard_args(LLVMRanLib - DEFAULT_MSG - LLVMRANLIB_EXECUTABLE) - -SET_PACKAGE_PROPERTIES(LLVMRanLib PROPERTIES - DESCRIPTION "generate index for LLVM archive" -) diff --git a/benchmarks/thirdparty/benchmark/cmake/benchmark.pc.in b/benchmarks/thirdparty/benchmark/cmake/benchmark.pc.in deleted file mode 100755 index 1e84bff68d..0000000000 --- a/benchmarks/thirdparty/benchmark/cmake/benchmark.pc.in +++ /dev/null @@ -1,11 +0,0 @@ -prefix=@CMAKE_INSTALL_PREFIX@ -exec_prefix=${prefix} -libdir=${prefix}/lib -includedir=${prefix}/include - -Name: @PROJECT_NAME@ -Description: Google microbenchmark framework -Version: @VERSION@ - -Libs: -L${libdir} -lbenchmark -Cflags: -I${includedir} diff --git a/benchmarks/thirdparty/benchmark/cmake/gnu_posix_regex.cpp b/benchmarks/thirdparty/benchmark/cmake/gnu_posix_regex.cpp deleted file mode 100755 index b5b91cdab7..0000000000 --- a/benchmarks/thirdparty/benchmark/cmake/gnu_posix_regex.cpp +++ /dev/null @@ -1,12 +0,0 @@ -#include -#include -int main() { - std::string str = "test0159"; - regex_t re; - int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB); - if (ec != 0) { - return ec; - } - return regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0; -} - diff --git a/benchmarks/thirdparty/benchmark/cmake/llvm-toolchain.cmake b/benchmarks/thirdparty/benchmark/cmake/llvm-toolchain.cmake deleted file mode 100755 index fc119e52fd..0000000000 --- a/benchmarks/thirdparty/benchmark/cmake/llvm-toolchain.cmake +++ /dev/null @@ -1,8 +0,0 @@ -find_package(LLVMAr REQUIRED) -set(CMAKE_AR "${LLVMAR_EXECUTABLE}" CACHE FILEPATH "" FORCE) - -find_package(LLVMNm REQUIRED) -set(CMAKE_NM "${LLVMNM_EXECUTABLE}" CACHE FILEPATH "" FORCE) - -find_package(LLVMRanLib REQUIRED) -set(CMAKE_RANLIB "${LLVMRANLIB_EXECUTABLE}" CACHE FILEPATH "" FORCE) diff --git a/benchmarks/thirdparty/benchmark/cmake/posix_regex.cpp b/benchmarks/thirdparty/benchmark/cmake/posix_regex.cpp deleted file mode 100755 index 466dc62560..0000000000 --- a/benchmarks/thirdparty/benchmark/cmake/posix_regex.cpp +++ /dev/null @@ -1,14 +0,0 @@ -#include -#include -int main() { - std::string str = "test0159"; - regex_t re; - int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB); - if (ec != 0) { - return ec; - } - int ret = regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0; - regfree(&re); - return ret; -} - diff --git a/benchmarks/thirdparty/benchmark/cmake/split_list.cmake b/benchmarks/thirdparty/benchmark/cmake/split_list.cmake deleted file mode 100755 index 67aed3fdc8..0000000000 --- a/benchmarks/thirdparty/benchmark/cmake/split_list.cmake +++ /dev/null @@ -1,3 +0,0 @@ -macro(split_list listname) - string(REPLACE ";" " " ${listname} "${${listname}}") -endmacro() diff --git a/benchmarks/thirdparty/benchmark/cmake/std_regex.cpp b/benchmarks/thirdparty/benchmark/cmake/std_regex.cpp deleted file mode 100755 index 696f2a26bc..0000000000 --- a/benchmarks/thirdparty/benchmark/cmake/std_regex.cpp +++ /dev/null @@ -1,10 +0,0 @@ -#include -#include -int main() { - const std::string str = "test0159"; - std::regex re; - re = std::regex("^[a-z]+[0-9]+$", - std::regex_constants::extended | std::regex_constants::nosubs); - return std::regex_search(str, re) ? 0 : -1; -} - diff --git a/benchmarks/thirdparty/benchmark/cmake/steady_clock.cpp b/benchmarks/thirdparty/benchmark/cmake/steady_clock.cpp deleted file mode 100755 index 66d50d17e9..0000000000 --- a/benchmarks/thirdparty/benchmark/cmake/steady_clock.cpp +++ /dev/null @@ -1,7 +0,0 @@ -#include - -int main() { - typedef std::chrono::steady_clock Clock; - Clock::time_point tp = Clock::now(); - ((void)tp); -} diff --git a/benchmarks/thirdparty/benchmark/cmake/thread_safety_attributes.cpp b/benchmarks/thirdparty/benchmark/cmake/thread_safety_attributes.cpp deleted file mode 100755 index 46161babdb..0000000000 --- a/benchmarks/thirdparty/benchmark/cmake/thread_safety_attributes.cpp +++ /dev/null @@ -1,4 +0,0 @@ -#define HAVE_THREAD_SAFETY_ATTRIBUTES -#include "../src/mutex.h" - -int main() {} diff --git a/benchmarks/thirdparty/benchmark/docs/AssemblyTests.md b/benchmarks/thirdparty/benchmark/docs/AssemblyTests.md deleted file mode 100755 index 1fbdc269b5..0000000000 --- a/benchmarks/thirdparty/benchmark/docs/AssemblyTests.md +++ /dev/null @@ -1,147 +0,0 @@ -# Assembly Tests - -The Benchmark library provides a number of functions whose primary -purpose in to affect assembly generation, including `DoNotOptimize` -and `ClobberMemory`. In addition there are other functions, -such as `KeepRunning`, for which generating good assembly is paramount. - -For these functions it's important to have tests that verify the -correctness and quality of the implementation. This requires testing -the code generated by the compiler. - -This document describes how the Benchmark library tests compiler output, -as well as how to properly write new tests. - - -## Anatomy of a Test - -Writing a test has two steps: - -* Write the code you want to generate assembly for. -* Add `// CHECK` lines to match against the verified assembly. - -Example: -```c++ - -// CHECK-LABEL: test_add: -extern "C" int test_add() { - extern int ExternInt; - return ExternInt + 1; - - // CHECK: movl ExternInt(%rip), %eax - // CHECK: addl %eax - // CHECK: ret -} - -``` - -#### LLVM Filecheck - -[LLVM's Filecheck](https://llvm.org/docs/CommandGuide/FileCheck.html) -is used to test the generated assembly against the `// CHECK` lines -specified in the tests source file. Please see the documentation -linked above for information on how to write `CHECK` directives. - -#### Tips and Tricks: - -* Tests should match the minimal amount of output required to establish -correctness. `CHECK` directives don't have to match on the exact next line -after the previous match, so tests should omit checks for unimportant -bits of assembly. ([`CHECK-NEXT`](https://llvm.org/docs/CommandGuide/FileCheck.html#the-check-next-directive) -can be used to ensure a match occurs exactly after the previous match). - -* The tests are compiled with `-O3 -g0`. So we're only testing the -optimized output. - -* The assembly output is further cleaned up using `tools/strip_asm.py`. -This removes comments, assembler directives, and unused labels before -the test is run. - -* The generated and stripped assembly file for a test is output under -`/test/.s` - -* Filecheck supports using [`CHECK` prefixes](https://llvm.org/docs/CommandGuide/FileCheck.html#cmdoption-check-prefixes) -to specify lines that should only match in certain situations. -The Benchmark tests use `CHECK-CLANG` and `CHECK-GNU` for lines that -are only expected to match Clang or GCC's output respectively. Normal -`CHECK` lines match against all compilers. (Note: `CHECK-NOT` and -`CHECK-LABEL` are NOT prefixes. They are versions of non-prefixed -`CHECK` lines) - -* Use `extern "C"` to disable name mangling for specific functions. This -makes them easier to name in the `CHECK` lines. - - -## Problems Writing Portable Tests - -Writing tests which check the code generated by a compiler are -inherently non-portable. Different compilers and even different compiler -versions may generate entirely different code. The Benchmark tests -must tolerate this. - -LLVM Filecheck provides a number of mechanisms to help write -"more portable" tests; including [matching using regular expressions](https://llvm.org/docs/CommandGuide/FileCheck.html#filecheck-pattern-matching-syntax), -allowing the creation of [named variables](https://llvm.org/docs/CommandGuide/FileCheck.html#filecheck-variables) -for later matching, and [checking non-sequential matches](https://llvm.org/docs/CommandGuide/FileCheck.html#the-check-dag-directive). - -#### Capturing Variables - -For example, say GCC stores a variable in a register but Clang stores -it in memory. To write a test that tolerates both cases we "capture" -the destination of the store, and then use the captured expression -to write the remainder of the test. - -```c++ -// CHECK-LABEL: test_div_no_op_into_shr: -extern "C" void test_div_no_op_into_shr(int value) { - int divisor = 2; - benchmark::DoNotOptimize(divisor); // hide the value from the optimizer - return value / divisor; - - // CHECK: movl $2, [[DEST:.*]] - // CHECK: idivl [[DEST]] - // CHECK: ret -} -``` - -#### Using Regular Expressions to Match Differing Output - -Often tests require testing assembly lines which may subtly differ -between compilers or compiler versions. A common example of this -is matching stack frame addresses. In this case regular expressions -can be used to match the differing bits of output. For example: - -```c++ -int ExternInt; -struct Point { int x, y, z; }; - -// CHECK-LABEL: test_store_point: -extern "C" void test_store_point() { - Point p{ExternInt, ExternInt, ExternInt}; - benchmark::DoNotOptimize(p); - - // CHECK: movl ExternInt(%rip), %eax - // CHECK: movl %eax, -{{[0-9]+}}(%rsp) - // CHECK: movl %eax, -{{[0-9]+}}(%rsp) - // CHECK: movl %eax, -{{[0-9]+}}(%rsp) - // CHECK: ret -} -``` - -## Current Requirements and Limitations - -The tests require Filecheck to be installed along the `PATH` of the -build machine. Otherwise the tests will be disabled. - -Additionally, as mentioned in the previous section, codegen tests are -inherently non-portable. Currently the tests are limited to: - -* x86_64 targets. -* Compiled with GCC or Clang - -Further work could be done, at least on a limited basis, to extend the -tests to other architectures and compilers (using `CHECK` prefixes). - -Furthermore, the tests fail for builds which specify additional flags -that modify code generation, including `--coverage` or `-fsanitize=`. - diff --git a/benchmarks/thirdparty/benchmark/docs/tools.md b/benchmarks/thirdparty/benchmark/docs/tools.md deleted file mode 100755 index 70500bd322..0000000000 --- a/benchmarks/thirdparty/benchmark/docs/tools.md +++ /dev/null @@ -1,242 +0,0 @@ -# Benchmark Tools - -## compare_bench.py - -The `compare_bench.py` utility which can be used to compare the result of benchmarks. -The program is invoked like: - -``` bash -$ compare_bench.py [benchmark options]... -``` - -Where `` and `` either specify a benchmark executable file, or a JSON output file. The type of the input file is automatically detected. If a benchmark executable is specified then the benchmark is run to obtain the results. Otherwise the results are simply loaded from the output file. - -`[benchmark options]` will be passed to the benchmarks invocations. They can be anything that binary accepts, be it either normal `--benchmark_*` parameters, or some custom parameters your binary takes. - -The sample output using the JSON test files under `Inputs/` gives: - -``` bash -$ ./compare_bench.py ./gbench/Inputs/test1_run1.json ./gbench/Inputs/test1_run2.json -Comparing ./gbench/Inputs/test1_run1.json to ./gbench/Inputs/test1_run2.json -Benchmark Time CPU Time Old Time New CPU Old CPU New -------------------------------------------------------------------------------------------------------------- -BM_SameTimes +0.0000 +0.0000 10 10 10 10 -BM_2xFaster -0.5000 -0.5000 50 25 50 25 -BM_2xSlower +1.0000 +1.0000 50 100 50 100 -BM_1PercentFaster -0.0100 -0.0100 100 99 100 99 -BM_1PercentSlower +0.0100 +0.0100 100 101 100 101 -BM_10PercentFaster -0.1000 -0.1000 100 90 100 90 -BM_10PercentSlower +0.1000 +0.1000 100 110 100 110 -BM_100xSlower +99.0000 +99.0000 100 10000 100 10000 -BM_100xFaster -0.9900 -0.9900 10000 100 10000 100 -BM_10PercentCPUToTime +0.1000 -0.1000 100 110 100 90 -BM_ThirdFaster -0.3333 -0.3334 100 67 100 67 -BM_BadTimeUnit -0.9000 +0.2000 0 0 0 1 -``` - -As you can note, the values in `Time` and `CPU` columns are calculated as `(new - old) / |old|`. - -When a benchmark executable is run, the raw output from the benchmark is printed in real time to stdout. The sample output using `benchmark/basic_test` for both arguments looks like: - -``` -./compare_bench.py test/basic_test test/basic_test --benchmark_filter=BM_empty.* -RUNNING: test/basic_test --benchmark_filter=BM_empty.* --benchmark_out=/tmp/tmpN7LF3a -Run on (8 X 4000 MHz CPU s) -2017-11-07 23:28:36 ---------------------------------------------------------------------- -Benchmark Time CPU Iterations ---------------------------------------------------------------------- -BM_empty 4 ns 4 ns 170178757 -BM_empty/threads:8 1 ns 7 ns 103868920 -BM_empty_stop_start 0 ns 0 ns 1000000000 -BM_empty_stop_start/threads:8 0 ns 0 ns 1403031720 -RUNNING: /test/basic_test --benchmark_filter=BM_empty.* --benchmark_out=/tmp/tmplvrIp8 -Run on (8 X 4000 MHz CPU s) -2017-11-07 23:28:38 ---------------------------------------------------------------------- -Benchmark Time CPU Iterations ---------------------------------------------------------------------- -BM_empty 4 ns 4 ns 169534855 -BM_empty/threads:8 1 ns 7 ns 104188776 -BM_empty_stop_start 0 ns 0 ns 1000000000 -BM_empty_stop_start/threads:8 0 ns 0 ns 1404159424 -Comparing ../build/test/basic_test to ../build/test/basic_test -Benchmark Time CPU Time Old Time New CPU Old CPU New ---------------------------------------------------------------------------------------------------------------------- -BM_empty -0.0048 -0.0049 4 4 4 4 -BM_empty/threads:8 -0.0123 -0.0054 1 1 7 7 -BM_empty_stop_start -0.0000 -0.0000 0 0 0 0 -BM_empty_stop_start/threads:8 -0.0029 +0.0001 0 0 0 0 - -``` - -As you can note, the values in `Time` and `CPU` columns are calculated as `(new - old) / |old|`. -Obviously this example doesn't give any useful output, but it's intended to show the output format when 'compare_bench.py' needs to run benchmarks. - -## compare.py - -The `compare.py` can be used to compare the result of benchmarks. -There are three modes of operation: - -1. Just compare two benchmarks, what `compare_bench.py` did. -The program is invoked like: - -``` bash -$ compare.py benchmarks [benchmark options]... -``` -Where `` and `` either specify a benchmark executable file, or a JSON output file. The type of the input file is automatically detected. If a benchmark executable is specified then the benchmark is run to obtain the results. Otherwise the results are simply loaded from the output file. - -`[benchmark options]` will be passed to the benchmarks invocations. They can be anything that binary accepts, be it either normal `--benchmark_*` parameters, or some custom parameters your binary takes. - -Example output: -``` -$ ./compare.py benchmarks ./a.out ./a.out -RUNNING: ./a.out --benchmark_out=/tmp/tmprBT5nW -Run on (8 X 4000 MHz CPU s) -2017-11-07 21:16:44 ------------------------------------------------------- -Benchmark Time CPU Iterations ------------------------------------------------------- -BM_memcpy/8 36 ns 36 ns 19101577 211.669MB/s -BM_memcpy/64 76 ns 76 ns 9412571 800.199MB/s -BM_memcpy/512 84 ns 84 ns 8249070 5.64771GB/s -BM_memcpy/1024 116 ns 116 ns 6181763 8.19505GB/s -BM_memcpy/8192 643 ns 643 ns 1062855 11.8636GB/s -BM_copy/8 222 ns 222 ns 3137987 34.3772MB/s -BM_copy/64 1608 ns 1608 ns 432758 37.9501MB/s -BM_copy/512 12589 ns 12589 ns 54806 38.7867MB/s -BM_copy/1024 25169 ns 25169 ns 27713 38.8003MB/s -BM_copy/8192 201165 ns 201112 ns 3486 38.8466MB/s -RUNNING: ./a.out --benchmark_out=/tmp/tmpt1wwG_ -Run on (8 X 4000 MHz CPU s) -2017-11-07 21:16:53 ------------------------------------------------------- -Benchmark Time CPU Iterations ------------------------------------------------------- -BM_memcpy/8 36 ns 36 ns 19397903 211.255MB/s -BM_memcpy/64 73 ns 73 ns 9691174 839.635MB/s -BM_memcpy/512 85 ns 85 ns 8312329 5.60101GB/s -BM_memcpy/1024 118 ns 118 ns 6438774 8.11608GB/s -BM_memcpy/8192 656 ns 656 ns 1068644 11.6277GB/s -BM_copy/8 223 ns 223 ns 3146977 34.2338MB/s -BM_copy/64 1611 ns 1611 ns 435340 37.8751MB/s -BM_copy/512 12622 ns 12622 ns 54818 38.6844MB/s -BM_copy/1024 25257 ns 25239 ns 27779 38.6927MB/s -BM_copy/8192 205013 ns 205010 ns 3479 38.108MB/s -Comparing ./a.out to ./a.out -Benchmark Time CPU Time Old Time New CPU Old CPU New ------------------------------------------------------------------------------------------------------- -BM_memcpy/8 +0.0020 +0.0020 36 36 36 36 -BM_memcpy/64 -0.0468 -0.0470 76 73 76 73 -BM_memcpy/512 +0.0081 +0.0083 84 85 84 85 -BM_memcpy/1024 +0.0098 +0.0097 116 118 116 118 -BM_memcpy/8192 +0.0200 +0.0203 643 656 643 656 -BM_copy/8 +0.0046 +0.0042 222 223 222 223 -BM_copy/64 +0.0020 +0.0020 1608 1611 1608 1611 -BM_copy/512 +0.0027 +0.0026 12589 12622 12589 12622 -BM_copy/1024 +0.0035 +0.0028 25169 25257 25169 25239 -BM_copy/8192 +0.0191 +0.0194 201165 205013 201112 205010 -``` - -What it does is for the every benchmark from the first run it looks for the benchmark with exactly the same name in the second run, and then compares the results. If the names differ, the benchmark is omitted from the diff. -As you can note, the values in `Time` and `CPU` columns are calculated as `(new - old) / |old|`. - -2. Compare two different filters of one benchmark -The program is invoked like: - -``` bash -$ compare.py filters [benchmark options]... -``` -Where `` either specify a benchmark executable file, or a JSON output file. The type of the input file is automatically detected. If a benchmark executable is specified then the benchmark is run to obtain the results. Otherwise the results are simply loaded from the output file. - -Where `` and `` are the same regex filters that you would pass to the `[--benchmark_filter=]` parameter of the benchmark binary. - -`[benchmark options]` will be passed to the benchmarks invocations. They can be anything that binary accepts, be it either normal `--benchmark_*` parameters, or some custom parameters your binary takes. - -Example output: -``` -$ ./compare.py filters ./a.out BM_memcpy BM_copy -RUNNING: ./a.out --benchmark_filter=BM_memcpy --benchmark_out=/tmp/tmpBWKk0k -Run on (8 X 4000 MHz CPU s) -2017-11-07 21:37:28 ------------------------------------------------------- -Benchmark Time CPU Iterations ------------------------------------------------------- -BM_memcpy/8 36 ns 36 ns 17891491 211.215MB/s -BM_memcpy/64 74 ns 74 ns 9400999 825.646MB/s -BM_memcpy/512 87 ns 87 ns 8027453 5.46126GB/s -BM_memcpy/1024 111 ns 111 ns 6116853 8.5648GB/s -BM_memcpy/8192 657 ns 656 ns 1064679 11.6247GB/s -RUNNING: ./a.out --benchmark_filter=BM_copy --benchmark_out=/tmp/tmpAvWcOM -Run on (8 X 4000 MHz CPU s) -2017-11-07 21:37:33 ----------------------------------------------------- -Benchmark Time CPU Iterations ----------------------------------------------------- -BM_copy/8 227 ns 227 ns 3038700 33.6264MB/s -BM_copy/64 1640 ns 1640 ns 426893 37.2154MB/s -BM_copy/512 12804 ns 12801 ns 55417 38.1444MB/s -BM_copy/1024 25409 ns 25407 ns 27516 38.4365MB/s -BM_copy/8192 202986 ns 202990 ns 3454 38.4871MB/s -Comparing BM_memcpy to BM_copy (from ./a.out) -Benchmark Time CPU Time Old Time New CPU Old CPU New --------------------------------------------------------------------------------------------------------------------- -[BM_memcpy vs. BM_copy]/8 +5.2829 +5.2812 36 227 36 227 -[BM_memcpy vs. BM_copy]/64 +21.1719 +21.1856 74 1640 74 1640 -[BM_memcpy vs. BM_copy]/512 +145.6487 +145.6097 87 12804 87 12801 -[BM_memcpy vs. BM_copy]/1024 +227.1860 +227.1776 111 25409 111 25407 -[BM_memcpy vs. BM_copy]/8192 +308.1664 +308.2898 657 202986 656 202990 -``` - -As you can see, it applies filter to the benchmarks, both when running the benchmark, and before doing the diff. And to make the diff work, the matches are replaced with some common string. Thus, you can compare two different benchmark families within one benchmark binary. -As you can note, the values in `Time` and `CPU` columns are calculated as `(new - old) / |old|`. - -3. Compare filter one from benchmark one to filter two from benchmark two: -The program is invoked like: - -``` bash -$ compare.py filters [benchmark options]... -``` - -Where `` and `` either specify a benchmark executable file, or a JSON output file. The type of the input file is automatically detected. If a benchmark executable is specified then the benchmark is run to obtain the results. Otherwise the results are simply loaded from the output file. - -Where `` and `` are the same regex filters that you would pass to the `[--benchmark_filter=]` parameter of the benchmark binary. - -`[benchmark options]` will be passed to the benchmarks invocations. They can be anything that binary accepts, be it either normal `--benchmark_*` parameters, or some custom parameters your binary takes. - -Example output: -``` -$ ./compare.py benchmarksfiltered ./a.out BM_memcpy ./a.out BM_copy -RUNNING: ./a.out --benchmark_filter=BM_memcpy --benchmark_out=/tmp/tmp_FvbYg -Run on (8 X 4000 MHz CPU s) -2017-11-07 21:38:27 ------------------------------------------------------- -Benchmark Time CPU Iterations ------------------------------------------------------- -BM_memcpy/8 37 ns 37 ns 18953482 204.118MB/s -BM_memcpy/64 74 ns 74 ns 9206578 828.245MB/s -BM_memcpy/512 91 ns 91 ns 8086195 5.25476GB/s -BM_memcpy/1024 120 ns 120 ns 5804513 7.95662GB/s -BM_memcpy/8192 664 ns 664 ns 1028363 11.4948GB/s -RUNNING: ./a.out --benchmark_filter=BM_copy --benchmark_out=/tmp/tmpDfL5iE -Run on (8 X 4000 MHz CPU s) -2017-11-07 21:38:32 ----------------------------------------------------- -Benchmark Time CPU Iterations ----------------------------------------------------- -BM_copy/8 230 ns 230 ns 2985909 33.1161MB/s -BM_copy/64 1654 ns 1653 ns 419408 36.9137MB/s -BM_copy/512 13122 ns 13120 ns 53403 37.2156MB/s -BM_copy/1024 26679 ns 26666 ns 26575 36.6218MB/s -BM_copy/8192 215068 ns 215053 ns 3221 36.3283MB/s -Comparing BM_memcpy (from ./a.out) to BM_copy (from ./a.out) -Benchmark Time CPU Time Old Time New CPU Old CPU New --------------------------------------------------------------------------------------------------------------------- -[BM_memcpy vs. BM_copy]/8 +5.1649 +5.1637 37 230 37 230 -[BM_memcpy vs. BM_copy]/64 +21.4352 +21.4374 74 1654 74 1653 -[BM_memcpy vs. BM_copy]/512 +143.6022 +143.5865 91 13122 91 13120 -[BM_memcpy vs. BM_copy]/1024 +221.5903 +221.4790 120 26679 120 26666 -[BM_memcpy vs. BM_copy]/8192 +322.9059 +323.0096 664 215068 664 215053 -``` -This is a mix of the previous two modes, two (potentially different) benchmark binaries are run, and a different filter is applied to each one. -As you can note, the values in `Time` and `CPU` columns are calculated as `(new - old) / |old|`. diff --git a/benchmarks/thirdparty/benchmark/include/benchmark/benchmark.h b/benchmarks/thirdparty/benchmark/include/benchmark/benchmark.h deleted file mode 100755 index 23dd3d09b1..0000000000 --- a/benchmarks/thirdparty/benchmark/include/benchmark/benchmark.h +++ /dev/null @@ -1,1456 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Support for registering benchmarks for functions. - -/* Example usage: -// Define a function that executes the code to be measured a -// specified number of times: -static void BM_StringCreation(benchmark::State& state) { - for (auto _ : state) - std::string empty_string; -} - -// Register the function as a benchmark -BENCHMARK(BM_StringCreation); - -// Define another benchmark -static void BM_StringCopy(benchmark::State& state) { - std::string x = "hello"; - for (auto _ : state) - std::string copy(x); -} -BENCHMARK(BM_StringCopy); - -// Augment the main() program to invoke benchmarks if specified -// via the --benchmarks command line flag. E.g., -// my_unittest --benchmark_filter=all -// my_unittest --benchmark_filter=BM_StringCreation -// my_unittest --benchmark_filter=String -// my_unittest --benchmark_filter='Copy|Creation' -int main(int argc, char** argv) { - benchmark::Initialize(&argc, argv); - benchmark::RunSpecifiedBenchmarks(); - return 0; -} - -// Sometimes a family of microbenchmarks can be implemented with -// just one routine that takes an extra argument to specify which -// one of the family of benchmarks to run. For example, the following -// code defines a family of microbenchmarks for measuring the speed -// of memcpy() calls of different lengths: - -static void BM_memcpy(benchmark::State& state) { - char* src = new char[state.range(0)]; char* dst = new char[state.range(0)]; - memset(src, 'x', state.range(0)); - for (auto _ : state) - memcpy(dst, src, state.range(0)); - state.SetBytesProcessed(int64_t(state.iterations()) * - int64_t(state.range(0))); - delete[] src; delete[] dst; -} -BENCHMARK(BM_memcpy)->Arg(8)->Arg(64)->Arg(512)->Arg(1<<10)->Arg(8<<10); - -// The preceding code is quite repetitive, and can be replaced with the -// following short-hand. The following invocation will pick a few -// appropriate arguments in the specified range and will generate a -// microbenchmark for each such argument. -BENCHMARK(BM_memcpy)->Range(8, 8<<10); - -// You might have a microbenchmark that depends on two inputs. For -// example, the following code defines a family of microbenchmarks for -// measuring the speed of set insertion. -static void BM_SetInsert(benchmark::State& state) { - set data; - for (auto _ : state) { - state.PauseTiming(); - data = ConstructRandomSet(state.range(0)); - state.ResumeTiming(); - for (int j = 0; j < state.range(1); ++j) - data.insert(RandomNumber()); - } -} -BENCHMARK(BM_SetInsert) - ->Args({1<<10, 128}) - ->Args({2<<10, 128}) - ->Args({4<<10, 128}) - ->Args({8<<10, 128}) - ->Args({1<<10, 512}) - ->Args({2<<10, 512}) - ->Args({4<<10, 512}) - ->Args({8<<10, 512}); - -// The preceding code is quite repetitive, and can be replaced with -// the following short-hand. The following macro will pick a few -// appropriate arguments in the product of the two specified ranges -// and will generate a microbenchmark for each such pair. -BENCHMARK(BM_SetInsert)->Ranges({{1<<10, 8<<10}, {128, 512}}); - -// For more complex patterns of inputs, passing a custom function -// to Apply allows programmatic specification of an -// arbitrary set of arguments to run the microbenchmark on. -// The following example enumerates a dense range on -// one parameter, and a sparse range on the second. -static void CustomArguments(benchmark::internal::Benchmark* b) { - for (int i = 0; i <= 10; ++i) - for (int j = 32; j <= 1024*1024; j *= 8) - b->Args({i, j}); -} -BENCHMARK(BM_SetInsert)->Apply(CustomArguments); - -// Templated microbenchmarks work the same way: -// Produce then consume 'size' messages 'iters' times -// Measures throughput in the absence of multiprogramming. -template int BM_Sequential(benchmark::State& state) { - Q q; - typename Q::value_type v; - for (auto _ : state) { - for (int i = state.range(0); i--; ) - q.push(v); - for (int e = state.range(0); e--; ) - q.Wait(&v); - } - // actually messages, not bytes: - state.SetBytesProcessed( - static_cast(state.iterations())*state.range(0)); -} -BENCHMARK_TEMPLATE(BM_Sequential, WaitQueue)->Range(1<<0, 1<<10); - -Use `Benchmark::MinTime(double t)` to set the minimum time used to run the -benchmark. This option overrides the `benchmark_min_time` flag. - -void BM_test(benchmark::State& state) { - ... body ... -} -BENCHMARK(BM_test)->MinTime(2.0); // Run for at least 2 seconds. - -In a multithreaded test, it is guaranteed that none of the threads will start -until all have reached the loop start, and all will have finished before any -thread exits the loop body. As such, any global setup or teardown you want to -do can be wrapped in a check against the thread index: - -static void BM_MultiThreaded(benchmark::State& state) { - if (state.thread_index == 0) { - // Setup code here. - } - for (auto _ : state) { - // Run the test as normal. - } - if (state.thread_index == 0) { - // Teardown code here. - } -} -BENCHMARK(BM_MultiThreaded)->Threads(4); - - -If a benchmark runs a few milliseconds it may be hard to visually compare the -measured times, since the output data is given in nanoseconds per default. In -order to manually set the time unit, you can specify it manually: - -BENCHMARK(BM_test)->Unit(benchmark::kMillisecond); -*/ - -#ifndef BENCHMARK_BENCHMARK_H_ -#define BENCHMARK_BENCHMARK_H_ - - -// The _MSVC_LANG check should detect Visual Studio 2015 Update 3 and newer. -#if __cplusplus >= 201103L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201103L) -#define BENCHMARK_HAS_CXX11 -#endif - -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#if defined(BENCHMARK_HAS_CXX11) -#include -#include -#include -#endif - -#if defined(_MSC_VER) -#include // for _ReadWriteBarrier -#endif - -#ifndef BENCHMARK_HAS_CXX11 -#define BENCHMARK_DISALLOW_COPY_AND_ASSIGN(TypeName) \ - TypeName(const TypeName&); \ - TypeName& operator=(const TypeName&) -#else -#define BENCHMARK_DISALLOW_COPY_AND_ASSIGN(TypeName) \ - TypeName(const TypeName&) = delete; \ - TypeName& operator=(const TypeName&) = delete -#endif - -#if defined(__GNUC__) -#define BENCHMARK_UNUSED __attribute__((unused)) -#define BENCHMARK_ALWAYS_INLINE __attribute__((always_inline)) -#define BENCHMARK_NOEXCEPT noexcept -#define BENCHMARK_NOEXCEPT_OP(x) noexcept(x) -#elif defined(_MSC_VER) && !defined(__clang__) -#define BENCHMARK_UNUSED -#define BENCHMARK_ALWAYS_INLINE __forceinline -#if _MSC_VER >= 1900 -#define BENCHMARK_NOEXCEPT noexcept -#define BENCHMARK_NOEXCEPT_OP(x) noexcept(x) -#else -#define BENCHMARK_NOEXCEPT -#define BENCHMARK_NOEXCEPT_OP(x) -#endif -#define __func__ __FUNCTION__ -#else -#define BENCHMARK_UNUSED -#define BENCHMARK_ALWAYS_INLINE -#define BENCHMARK_NOEXCEPT -#define BENCHMARK_NOEXCEPT_OP(x) -#endif - -#define BENCHMARK_INTERNAL_TOSTRING2(x) #x -#define BENCHMARK_INTERNAL_TOSTRING(x) BENCHMARK_INTERNAL_TOSTRING2(x) - -#if defined(__GNUC__) -#define BENCHMARK_BUILTIN_EXPECT(x, y) __builtin_expect(x, y) -#define BENCHMARK_DEPRECATED_MSG(msg) __attribute__((deprecated(msg))) -#else -#define BENCHMARK_BUILTIN_EXPECT(x, y) x -#define BENCHMARK_DEPRECATED_MSG(msg) -#define BENCHMARK_WARNING_MSG(msg) __pragma(message(__FILE__ "(" BENCHMARK_INTERNAL_TOSTRING(__LINE__) ") : warning note: " msg)) -#endif - -#if defined(__GNUC__) && !defined(__clang__) -#define BENCHMARK_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) -#endif - -namespace benchmark { -class BenchmarkReporter; - -void Initialize(int* argc, char** argv); - -// Report to stdout all arguments in 'argv' as unrecognized except the first. -// Returns true there is at least on unrecognized argument (i.e. 'argc' > 1). -bool ReportUnrecognizedArguments(int argc, char** argv); - -// Generate a list of benchmarks matching the specified --benchmark_filter flag -// and if --benchmark_list_tests is specified return after printing the name -// of each matching benchmark. Otherwise run each matching benchmark and -// report the results. -// -// The second and third overload use the specified 'console_reporter' and -// 'file_reporter' respectively. 'file_reporter' will write to the file -// specified -// by '--benchmark_output'. If '--benchmark_output' is not given the -// 'file_reporter' is ignored. -// -// RETURNS: The number of matching benchmarks. -size_t RunSpecifiedBenchmarks(); -size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter); -size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter, - BenchmarkReporter* file_reporter); - -// If this routine is called, peak memory allocation past this point in the -// benchmark is reported at the end of the benchmark report line. (It is -// computed by running the benchmark once with a single iteration and a memory -// tracer.) -// TODO(dominic) -// void MemoryUsage(); - -namespace internal { -class Benchmark; -class BenchmarkImp; -class BenchmarkFamilies; - -void UseCharPointer(char const volatile*); - -// Take ownership of the pointer and register the benchmark. Return the -// registered benchmark. -Benchmark* RegisterBenchmarkInternal(Benchmark*); - -// Ensure that the standard streams are properly initialized in every TU. -int InitializeStreams(); -BENCHMARK_UNUSED static int stream_init_anchor = InitializeStreams(); - -} // namespace internal - - -#if (!defined(__GNUC__) && !defined(__clang__)) || defined(__pnacl__) || \ - defined(__EMSCRIPTEN__) -# define BENCHMARK_HAS_NO_INLINE_ASSEMBLY -#endif - - -// The DoNotOptimize(...) function can be used to prevent a value or -// expression from being optimized away by the compiler. This function is -// intended to add little to no overhead. -// See: https://youtu.be/nXaxk27zwlk?t=2441 -#ifndef BENCHMARK_HAS_NO_INLINE_ASSEMBLY -template -inline BENCHMARK_ALWAYS_INLINE -void DoNotOptimize(Tp const& value) { - asm volatile("" : : "r,m"(value) : "memory"); -} - -template -inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp& value) { -#if defined(__clang__) - asm volatile("" : "+r,m"(value) : : "memory"); -#else - asm volatile("" : "+m,r"(value) : : "memory"); -#endif -} - -// Force the compiler to flush pending writes to global memory. Acts as an -// effective read/write barrier -inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() { - asm volatile("" : : : "memory"); -} -#elif defined(_MSC_VER) -template -inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) { - internal::UseCharPointer(&reinterpret_cast(value)); - _ReadWriteBarrier(); -} - -inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() { - _ReadWriteBarrier(); -} -#else -template -inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) { - internal::UseCharPointer(&reinterpret_cast(value)); -} -// FIXME Add ClobberMemory() for non-gnu and non-msvc compilers -#endif - - - -// This class is used for user-defined counters. -class Counter { -public: - - enum Flags { - kDefaults = 0, - // Mark the counter as a rate. It will be presented divided - // by the duration of the benchmark. - kIsRate = 1, - // Mark the counter as a thread-average quantity. It will be - // presented divided by the number of threads. - kAvgThreads = 2, - // Mark the counter as a thread-average rate. See above. - kAvgThreadsRate = kIsRate|kAvgThreads - }; - - double value; - Flags flags; - - BENCHMARK_ALWAYS_INLINE - Counter(double v = 0., Flags f = kDefaults) : value(v), flags(f) {} - - BENCHMARK_ALWAYS_INLINE operator double const& () const { return value; } - BENCHMARK_ALWAYS_INLINE operator double & () { return value; } - -}; - -// This is the container for the user-defined counters. -typedef std::map UserCounters; - - -// TimeUnit is passed to a benchmark in order to specify the order of magnitude -// for the measured time. -enum TimeUnit { kNanosecond, kMicrosecond, kMillisecond }; - -// BigO is passed to a benchmark in order to specify the asymptotic -// computational -// complexity for the benchmark. In case oAuto is selected, complexity will be -// calculated automatically to the best fit. -enum BigO { oNone, o1, oN, oNSquared, oNCubed, oLogN, oNLogN, oAuto, oLambda }; - -// BigOFunc is passed to a benchmark in order to specify the asymptotic -// computational complexity for the benchmark. -typedef double(BigOFunc)(int64_t); - -// StatisticsFunc is passed to a benchmark in order to compute some descriptive -// statistics over all the measurements of some type -typedef double(StatisticsFunc)(const std::vector&); - -struct Statistics { - std::string name_; - StatisticsFunc* compute_; - - Statistics(std::string name, StatisticsFunc* compute) - : name_(name), compute_(compute) {} -}; - -namespace internal { -class ThreadTimer; -class ThreadManager; - -enum ReportMode -#if defined(BENCHMARK_HAS_CXX11) - : unsigned -#else -#endif - { - RM_Unspecified, // The mode has not been manually specified - RM_Default, // The mode is user-specified as default. - RM_ReportAggregatesOnly -}; -} // namespace internal - -// State is passed to a running Benchmark and contains state for the -// benchmark to use. -class State { - public: - struct StateIterator; - friend struct StateIterator; - - // Returns iterators used to run each iteration of a benchmark using a - // C++11 ranged-based for loop. These functions should not be called directly. - // - // REQUIRES: The benchmark has not started running yet. Neither begin nor end - // have been called previously. - // - // NOTE: KeepRunning may not be used after calling either of these functions. - BENCHMARK_ALWAYS_INLINE StateIterator begin(); - BENCHMARK_ALWAYS_INLINE StateIterator end(); - - // Returns true if the benchmark should continue through another iteration. - // NOTE: A benchmark may not return from the test until KeepRunning() has - // returned false. - bool KeepRunning(); - - // Returns true iff the benchmark should run n more iterations. - // REQUIRES: 'n' > 0. - // NOTE: A benchmark must not return from the test until KeepRunningBatch() - // has returned false. - // NOTE: KeepRunningBatch() may overshoot by up to 'n' iterations. - // - // Intended usage: - // while (state.KeepRunningBatch(1000)) { - // // process 1000 elements - // } - bool KeepRunningBatch(size_t n); - - // REQUIRES: timer is running and 'SkipWithError(...)' has not been called - // by the current thread. - // Stop the benchmark timer. If not called, the timer will be - // automatically stopped after the last iteration of the benchmark loop. - // - // For threaded benchmarks the PauseTiming() function only pauses the timing - // for the current thread. - // - // NOTE: The "real time" measurement is per-thread. If different threads - // report different measurements the largest one is reported. - // - // NOTE: PauseTiming()/ResumeTiming() are relatively - // heavyweight, and so their use should generally be avoided - // within each benchmark iteration, if possible. - void PauseTiming(); - - // REQUIRES: timer is not running and 'SkipWithError(...)' has not been called - // by the current thread. - // Start the benchmark timer. The timer is NOT running on entrance to the - // benchmark function. It begins running after control flow enters the - // benchmark loop. - // - // NOTE: PauseTiming()/ResumeTiming() are relatively - // heavyweight, and so their use should generally be avoided - // within each benchmark iteration, if possible. - void ResumeTiming(); - - // REQUIRES: 'SkipWithError(...)' has not been called previously by the - // current thread. - // Report the benchmark as resulting in an error with the specified 'msg'. - // After this call the user may explicitly 'return' from the benchmark. - // - // If the ranged-for style of benchmark loop is used, the user must explicitly - // break from the loop, otherwise all future iterations will be run. - // If the 'KeepRunning()' loop is used the current thread will automatically - // exit the loop at the end of the current iteration. - // - // For threaded benchmarks only the current thread stops executing and future - // calls to `KeepRunning()` will block until all threads have completed - // the `KeepRunning()` loop. If multiple threads report an error only the - // first error message is used. - // - // NOTE: Calling 'SkipWithError(...)' does not cause the benchmark to exit - // the current scope immediately. If the function is called from within - // the 'KeepRunning()' loop the current iteration will finish. It is the users - // responsibility to exit the scope as needed. - void SkipWithError(const char* msg); - - // REQUIRES: called exactly once per iteration of the benchmarking loop. - // Set the manually measured time for this benchmark iteration, which - // is used instead of automatically measured time if UseManualTime() was - // specified. - // - // For threaded benchmarks the final value will be set to the largest - // reported values. - void SetIterationTime(double seconds); - - // Set the number of bytes processed by the current benchmark - // execution. This routine is typically called once at the end of a - // throughput oriented benchmark. If this routine is called with a - // value > 0, the report is printed in MB/sec instead of nanoseconds - // per iteration. - // - // REQUIRES: a benchmark has exited its benchmarking loop. - BENCHMARK_ALWAYS_INLINE - void SetBytesProcessed(int64_t bytes) { bytes_processed_ = bytes; } - - BENCHMARK_ALWAYS_INLINE - int64_t bytes_processed() const { return bytes_processed_; } - - // If this routine is called with complexity_n > 0 and complexity report is - // requested for the - // family benchmark, then current benchmark will be part of the computation - // and complexity_n will - // represent the length of N. - BENCHMARK_ALWAYS_INLINE - void SetComplexityN(int64_t complexity_n) { complexity_n_ = complexity_n; } - - BENCHMARK_ALWAYS_INLINE - int64_t complexity_length_n() { return complexity_n_; } - - // If this routine is called with items > 0, then an items/s - // label is printed on the benchmark report line for the currently - // executing benchmark. It is typically called at the end of a processing - // benchmark where a processing items/second output is desired. - // - // REQUIRES: a benchmark has exited its benchmarking loop. - BENCHMARK_ALWAYS_INLINE - void SetItemsProcessed(int64_t items) { items_processed_ = items; } - - BENCHMARK_ALWAYS_INLINE - int64_t items_processed() const { return items_processed_; } - - // If this routine is called, the specified label is printed at the - // end of the benchmark report line for the currently executing - // benchmark. Example: - // static void BM_Compress(benchmark::State& state) { - // ... - // double compress = input_size / output_size; - // state.SetLabel(StrFormat("compress:%.1f%%", 100.0*compression)); - // } - // Produces output that looks like: - // BM_Compress 50 50 14115038 compress:27.3% - // - // REQUIRES: a benchmark has exited its benchmarking loop. - void SetLabel(const char* label); - - void BENCHMARK_ALWAYS_INLINE SetLabel(const std::string& str) { - this->SetLabel(str.c_str()); - } - - // Range arguments for this run. CHECKs if the argument has been set. - BENCHMARK_ALWAYS_INLINE - int64_t range(std::size_t pos = 0) const { - assert(range_.size() > pos); - return range_[pos]; - } - - BENCHMARK_DEPRECATED_MSG("use 'range(0)' instead") - int64_t range_x() const { return range(0); } - - BENCHMARK_DEPRECATED_MSG("use 'range(1)' instead") - int64_t range_y() const { return range(1); } - - BENCHMARK_ALWAYS_INLINE - size_t iterations() const { - if (BENCHMARK_BUILTIN_EXPECT(!started_, false)) { - return 0; - } - return max_iterations - total_iterations_ + batch_leftover_; - } - -private: // items we expect on the first cache line (ie 64 bytes of the struct) - - // When total_iterations_ is 0, KeepRunning() and friends will return false. - // May be larger than max_iterations. - size_t total_iterations_; - - // When using KeepRunningBatch(), batch_leftover_ holds the number of - // iterations beyond max_iters that were run. Used to track - // completed_iterations_ accurately. - size_t batch_leftover_; - -public: - const size_t max_iterations; - -private: - bool started_; - bool finished_; - bool error_occurred_; - -private: // items we don't need on the first cache line - std::vector range_; - - int64_t bytes_processed_; - int64_t items_processed_; - - int64_t complexity_n_; - - public: - // Container for user-defined counters. - UserCounters counters; - // Index of the executing thread. Values from [0, threads). - const int thread_index; - // Number of threads concurrently executing the benchmark. - const int threads; - - - // TODO(EricWF) make me private - State(size_t max_iters, const std::vector& ranges, int thread_i, - int n_threads, internal::ThreadTimer* timer, - internal::ThreadManager* manager); - - private: - void StartKeepRunning(); - // Implementation of KeepRunning() and KeepRunningBatch(). - // is_batch must be true unless n is 1. - bool KeepRunningInternal(size_t n, bool is_batch); - void FinishKeepRunning(); - internal::ThreadTimer* timer_; - internal::ThreadManager* manager_; - BENCHMARK_DISALLOW_COPY_AND_ASSIGN(State); -}; - -inline BENCHMARK_ALWAYS_INLINE -bool State::KeepRunning() { - return KeepRunningInternal(1, /*is_batch=*/ false); -} - -inline BENCHMARK_ALWAYS_INLINE -bool State::KeepRunningBatch(size_t n) { - return KeepRunningInternal(n, /*is_batch=*/ true); -} - -inline BENCHMARK_ALWAYS_INLINE -bool State::KeepRunningInternal(size_t n, bool is_batch) { - // total_iterations_ is set to 0 by the constructor, and always set to a - // nonzero value by StartKepRunning(). - assert(n > 0); - // n must be 1 unless is_batch is true. - assert(is_batch || n == 1); - if (BENCHMARK_BUILTIN_EXPECT(total_iterations_ >= n, true)) { - total_iterations_ -= n; - return true; - } - if (!started_) { - StartKeepRunning(); - if (!error_occurred_ && total_iterations_ >= n) { - total_iterations_-= n; - return true; - } - } - // For non-batch runs, total_iterations_ must be 0 by now. - if (is_batch && total_iterations_ != 0) { - batch_leftover_ = n - total_iterations_; - total_iterations_ = 0; - return true; - } - FinishKeepRunning(); - return false; -} - -struct State::StateIterator { - struct BENCHMARK_UNUSED Value {}; - typedef std::forward_iterator_tag iterator_category; - typedef Value value_type; - typedef Value reference; - typedef Value pointer; - typedef std::ptrdiff_t difference_type; - - private: - friend class State; - BENCHMARK_ALWAYS_INLINE - StateIterator() : cached_(0), parent_() {} - - BENCHMARK_ALWAYS_INLINE - explicit StateIterator(State* st) - : cached_(st->error_occurred_ ? 0 : st->max_iterations), parent_(st) {} - - public: - BENCHMARK_ALWAYS_INLINE - Value operator*() const { return Value(); } - - BENCHMARK_ALWAYS_INLINE - StateIterator& operator++() { - assert(cached_ > 0); - --cached_; - return *this; - } - - BENCHMARK_ALWAYS_INLINE - bool operator!=(StateIterator const&) const { - if (BENCHMARK_BUILTIN_EXPECT(cached_ != 0, true)) return true; - parent_->FinishKeepRunning(); - return false; - } - - private: - size_t cached_; - State* const parent_; -}; - -inline BENCHMARK_ALWAYS_INLINE State::StateIterator State::begin() { - return StateIterator(this); -} -inline BENCHMARK_ALWAYS_INLINE State::StateIterator State::end() { - StartKeepRunning(); - return StateIterator(); -} - -namespace internal { - -typedef void(Function)(State&); - -// ------------------------------------------------------ -// Benchmark registration object. The BENCHMARK() macro expands -// into an internal::Benchmark* object. Various methods can -// be called on this object to change the properties of the benchmark. -// Each method returns "this" so that multiple method calls can -// chained into one expression. -class Benchmark { - public: - virtual ~Benchmark(); - - // Note: the following methods all return "this" so that multiple - // method calls can be chained together in one expression. - - // Run this benchmark once with "x" as the extra argument passed - // to the function. - // REQUIRES: The function passed to the constructor must accept an arg1. - Benchmark* Arg(int64_t x); - - // Run this benchmark with the given time unit for the generated output report - Benchmark* Unit(TimeUnit unit); - - // Run this benchmark once for a number of values picked from the - // range [start..limit]. (start and limit are always picked.) - // REQUIRES: The function passed to the constructor must accept an arg1. - Benchmark* Range(int64_t start, int64_t limit); - - // Run this benchmark once for all values in the range [start..limit] with - // specific step - // REQUIRES: The function passed to the constructor must accept an arg1. - Benchmark* DenseRange(int64_t start, int64_t limit, int step = 1); - - // Run this benchmark once with "args" as the extra arguments passed - // to the function. - // REQUIRES: The function passed to the constructor must accept arg1, arg2 ... - Benchmark* Args(const std::vector& args); - - // Equivalent to Args({x, y}) - // NOTE: This is a legacy C++03 interface provided for compatibility only. - // New code should use 'Args'. - Benchmark* ArgPair(int64_t x, int64_t y) { - std::vector args; - args.push_back(x); - args.push_back(y); - return Args(args); - } - - // Run this benchmark once for a number of values picked from the - // ranges [start..limit]. (starts and limits are always picked.) - // REQUIRES: The function passed to the constructor must accept arg1, arg2 ... - Benchmark* Ranges(const std::vector >& ranges); - - // Equivalent to ArgNames({name}) - Benchmark* ArgName(const std::string& name); - - // Set the argument names to display in the benchmark name. If not called, - // only argument values will be shown. - Benchmark* ArgNames(const std::vector& names); - - // Equivalent to Ranges({{lo1, hi1}, {lo2, hi2}}). - // NOTE: This is a legacy C++03 interface provided for compatibility only. - // New code should use 'Ranges'. - Benchmark* RangePair(int64_t lo1, int64_t hi1, int64_t lo2, int64_t hi2) { - std::vector > ranges; - ranges.push_back(std::make_pair(lo1, hi1)); - ranges.push_back(std::make_pair(lo2, hi2)); - return Ranges(ranges); - } - - // Pass this benchmark object to *func, which can customize - // the benchmark by calling various methods like Arg, Args, - // Threads, etc. - Benchmark* Apply(void (*func)(Benchmark* benchmark)); - - // Set the range multiplier for non-dense range. If not called, the range - // multiplier kRangeMultiplier will be used. - Benchmark* RangeMultiplier(int multiplier); - - // Set the minimum amount of time to use when running this benchmark. This - // option overrides the `benchmark_min_time` flag. - // REQUIRES: `t > 0` and `Iterations` has not been called on this benchmark. - Benchmark* MinTime(double t); - - // Specify the amount of iterations that should be run by this benchmark. - // REQUIRES: 'n > 0' and `MinTime` has not been called on this benchmark. - // - // NOTE: This function should only be used when *exact* iteration control is - // needed and never to control or limit how long a benchmark runs, where - // `--benchmark_min_time=N` or `MinTime(...)` should be used instead. - Benchmark* Iterations(size_t n); - - // Specify the amount of times to repeat this benchmark. This option overrides - // the `benchmark_repetitions` flag. - // REQUIRES: `n > 0` - Benchmark* Repetitions(int n); - - // Specify if each repetition of the benchmark should be reported separately - // or if only the final statistics should be reported. If the benchmark - // is not repeated then the single result is always reported. - Benchmark* ReportAggregatesOnly(bool value = true); - - // If a particular benchmark is I/O bound, runs multiple threads internally or - // if for some reason CPU timings are not representative, call this method. If - // called, the elapsed time will be used to control how many iterations are - // run, and in the printing of items/second or MB/seconds values. If not - // called, the cpu time used by the benchmark will be used. - Benchmark* UseRealTime(); - - // If a benchmark must measure time manually (e.g. if GPU execution time is - // being - // measured), call this method. If called, each benchmark iteration should - // call - // SetIterationTime(seconds) to report the measured time, which will be used - // to control how many iterations are run, and in the printing of items/second - // or MB/second values. - Benchmark* UseManualTime(); - - // Set the asymptotic computational complexity for the benchmark. If called - // the asymptotic computational complexity will be shown on the output. - Benchmark* Complexity(BigO complexity = benchmark::oAuto); - - // Set the asymptotic computational complexity for the benchmark. If called - // the asymptotic computational complexity will be shown on the output. - Benchmark* Complexity(BigOFunc* complexity); - - // Add this statistics to be computed over all the values of benchmark run - Benchmark* ComputeStatistics(std::string name, StatisticsFunc* statistics); - - // Support for running multiple copies of the same benchmark concurrently - // in multiple threads. This may be useful when measuring the scaling - // of some piece of code. - - // Run one instance of this benchmark concurrently in t threads. - Benchmark* Threads(int t); - - // Pick a set of values T from [min_threads,max_threads]. - // min_threads and max_threads are always included in T. Run this - // benchmark once for each value in T. The benchmark run for a - // particular value t consists of t threads running the benchmark - // function concurrently. For example, consider: - // BENCHMARK(Foo)->ThreadRange(1,16); - // This will run the following benchmarks: - // Foo in 1 thread - // Foo in 2 threads - // Foo in 4 threads - // Foo in 8 threads - // Foo in 16 threads - Benchmark* ThreadRange(int min_threads, int max_threads); - - // For each value n in the range, run this benchmark once using n threads. - // min_threads and max_threads are always included in the range. - // stride specifies the increment. E.g. DenseThreadRange(1, 8, 3) starts - // a benchmark with 1, 4, 7 and 8 threads. - Benchmark* DenseThreadRange(int min_threads, int max_threads, int stride = 1); - - // Equivalent to ThreadRange(NumCPUs(), NumCPUs()) - Benchmark* ThreadPerCpu(); - - virtual void Run(State& state) = 0; - - // Used inside the benchmark implementation - struct Instance; - - protected: - explicit Benchmark(const char* name); - Benchmark(Benchmark const&); - void SetName(const char* name); - - int ArgsCnt() const; - - private: - friend class BenchmarkFamilies; - - std::string name_; - ReportMode report_mode_; - std::vector arg_names_; // Args for all benchmark runs - std::vector > args_; // Args for all benchmark runs - TimeUnit time_unit_; - int range_multiplier_; - double min_time_; - size_t iterations_; - int repetitions_; - bool use_real_time_; - bool use_manual_time_; - BigO complexity_; - BigOFunc* complexity_lambda_; - std::vector statistics_; - std::vector thread_counts_; - - Benchmark& operator=(Benchmark const&); -}; - -} // namespace internal - -// Create and register a benchmark with the specified 'name' that invokes -// the specified functor 'fn'. -// -// RETURNS: A pointer to the registered benchmark. -internal::Benchmark* RegisterBenchmark(const char* name, - internal::Function* fn); - -#if defined(BENCHMARK_HAS_CXX11) -template -internal::Benchmark* RegisterBenchmark(const char* name, Lambda&& fn); -#endif - -// Remove all registered benchmarks. All pointers to previously registered -// benchmarks are invalidated. -void ClearRegisteredBenchmarks(); - -namespace internal { -// The class used to hold all Benchmarks created from static function. -// (ie those created using the BENCHMARK(...) macros. -class FunctionBenchmark : public Benchmark { - public: - FunctionBenchmark(const char* name, Function* func) - : Benchmark(name), func_(func) {} - - virtual void Run(State& st); - - private: - Function* func_; -}; - -#ifdef BENCHMARK_HAS_CXX11 -template -class LambdaBenchmark : public Benchmark { - public: - virtual void Run(State& st) { lambda_(st); } - - private: - template - LambdaBenchmark(const char* name, OLambda&& lam) - : Benchmark(name), lambda_(std::forward(lam)) {} - - LambdaBenchmark(LambdaBenchmark const&) = delete; - - private: - template - friend Benchmark* ::benchmark::RegisterBenchmark(const char*, Lam&&); - - Lambda lambda_; -}; -#endif - -} // namespace internal - -inline internal::Benchmark* RegisterBenchmark(const char* name, - internal::Function* fn) { - return internal::RegisterBenchmarkInternal( - ::new internal::FunctionBenchmark(name, fn)); -} - -#ifdef BENCHMARK_HAS_CXX11 -template -internal::Benchmark* RegisterBenchmark(const char* name, Lambda&& fn) { - using BenchType = - internal::LambdaBenchmark::type>; - return internal::RegisterBenchmarkInternal( - ::new BenchType(name, std::forward(fn))); -} -#endif - -#if defined(BENCHMARK_HAS_CXX11) && \ - (!defined(BENCHMARK_GCC_VERSION) || BENCHMARK_GCC_VERSION >= 409) -template -internal::Benchmark* RegisterBenchmark(const char* name, Lambda&& fn, - Args&&... args) { - return benchmark::RegisterBenchmark( - name, [=](benchmark::State& st) { fn(st, args...); }); -} -#else -#define BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK -#endif - -// The base class for all fixture tests. -class Fixture : public internal::Benchmark { - public: - Fixture() : internal::Benchmark("") {} - - virtual void Run(State& st) { - this->SetUp(st); - this->BenchmarkCase(st); - this->TearDown(st); - } - - // These will be deprecated ... - virtual void SetUp(const State&) {} - virtual void TearDown(const State&) {} - // ... In favor of these. - virtual void SetUp(State& st) { SetUp(const_cast(st)); } - virtual void TearDown(State& st) { TearDown(const_cast(st)); } - - protected: - virtual void BenchmarkCase(State&) = 0; -}; - -} // namespace benchmark - -// ------------------------------------------------------ -// Macro to register benchmarks - -// Check that __COUNTER__ is defined and that __COUNTER__ increases by 1 -// every time it is expanded. X + 1 == X + 0 is used in case X is defined to be -// empty. If X is empty the expression becomes (+1 == +0). -#if defined(__COUNTER__) && (__COUNTER__ + 1 == __COUNTER__ + 0) -#define BENCHMARK_PRIVATE_UNIQUE_ID __COUNTER__ -#else -#define BENCHMARK_PRIVATE_UNIQUE_ID __LINE__ -#endif - -// Helpers for generating unique variable names -#define BENCHMARK_PRIVATE_NAME(n) \ - BENCHMARK_PRIVATE_CONCAT(_benchmark_, BENCHMARK_PRIVATE_UNIQUE_ID, n) -#define BENCHMARK_PRIVATE_CONCAT(a, b, c) BENCHMARK_PRIVATE_CONCAT2(a, b, c) -#define BENCHMARK_PRIVATE_CONCAT2(a, b, c) a##b##c - -#define BENCHMARK_PRIVATE_DECLARE(n) \ - static ::benchmark::internal::Benchmark* BENCHMARK_PRIVATE_NAME(n) \ - BENCHMARK_UNUSED - -#define BENCHMARK(n) \ - BENCHMARK_PRIVATE_DECLARE(n) = \ - (::benchmark::internal::RegisterBenchmarkInternal( \ - new ::benchmark::internal::FunctionBenchmark(#n, n))) - -// Old-style macros -#define BENCHMARK_WITH_ARG(n, a) BENCHMARK(n)->Arg((a)) -#define BENCHMARK_WITH_ARG2(n, a1, a2) BENCHMARK(n)->Args({(a1), (a2)}) -#define BENCHMARK_WITH_UNIT(n, t) BENCHMARK(n)->Unit((t)) -#define BENCHMARK_RANGE(n, lo, hi) BENCHMARK(n)->Range((lo), (hi)) -#define BENCHMARK_RANGE2(n, l1, h1, l2, h2) \ - BENCHMARK(n)->RangePair({{(l1), (h1)}, {(l2), (h2)}}) - -#ifdef BENCHMARK_HAS_CXX11 - -// Register a benchmark which invokes the function specified by `func` -// with the additional arguments specified by `...`. -// -// For example: -// -// template ` -// void BM_takes_args(benchmark::State& state, ExtraArgs&&... extra_args) { -// [...] -//} -// /* Registers a benchmark named "BM_takes_args/int_string_test` */ -// BENCHMARK_CAPTURE(BM_takes_args, int_string_test, 42, std::string("abc")); -#define BENCHMARK_CAPTURE(func, test_case_name, ...) \ - BENCHMARK_PRIVATE_DECLARE(func) = \ - (::benchmark::internal::RegisterBenchmarkInternal( \ - new ::benchmark::internal::FunctionBenchmark( \ - #func "/" #test_case_name, \ - [](::benchmark::State& st) { func(st, __VA_ARGS__); }))) - -#endif // BENCHMARK_HAS_CXX11 - -// This will register a benchmark for a templatized function. For example: -// -// template -// void BM_Foo(int iters); -// -// BENCHMARK_TEMPLATE(BM_Foo, 1); -// -// will register BM_Foo<1> as a benchmark. -#define BENCHMARK_TEMPLATE1(n, a) \ - BENCHMARK_PRIVATE_DECLARE(n) = \ - (::benchmark::internal::RegisterBenchmarkInternal( \ - new ::benchmark::internal::FunctionBenchmark(#n "<" #a ">", n))) - -#define BENCHMARK_TEMPLATE2(n, a, b) \ - BENCHMARK_PRIVATE_DECLARE(n) = \ - (::benchmark::internal::RegisterBenchmarkInternal( \ - new ::benchmark::internal::FunctionBenchmark(#n "<" #a "," #b ">", \ - n))) - -#ifdef BENCHMARK_HAS_CXX11 -#define BENCHMARK_TEMPLATE(n, ...) \ - BENCHMARK_PRIVATE_DECLARE(n) = \ - (::benchmark::internal::RegisterBenchmarkInternal( \ - new ::benchmark::internal::FunctionBenchmark( \ - #n "<" #__VA_ARGS__ ">", n<__VA_ARGS__>))) -#else -#define BENCHMARK_TEMPLATE(n, a) BENCHMARK_TEMPLATE1(n, a) -#endif - -#define BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \ - class BaseClass##_##Method##_Benchmark : public BaseClass { \ - public: \ - BaseClass##_##Method##_Benchmark() : BaseClass() { \ - this->SetName(#BaseClass "/" #Method); \ - } \ - \ - protected: \ - virtual void BenchmarkCase(::benchmark::State&); \ - }; - -#define BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \ - class BaseClass##_##Method##_Benchmark : public BaseClass { \ - public: \ - BaseClass##_##Method##_Benchmark() : BaseClass() { \ - this->SetName(#BaseClass"<" #a ">/" #Method); \ - } \ - \ - protected: \ - virtual void BenchmarkCase(::benchmark::State&); \ - }; - -#define BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \ - class BaseClass##_##Method##_Benchmark : public BaseClass { \ - public: \ - BaseClass##_##Method##_Benchmark() : BaseClass() { \ - this->SetName(#BaseClass"<" #a "," #b ">/" #Method); \ - } \ - \ - protected: \ - virtual void BenchmarkCase(::benchmark::State&); \ - }; - -#ifdef BENCHMARK_HAS_CXX11 -#define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, ...) \ - class BaseClass##_##Method##_Benchmark : public BaseClass<__VA_ARGS__> { \ - public: \ - BaseClass##_##Method##_Benchmark() : BaseClass<__VA_ARGS__>() { \ - this->SetName(#BaseClass"<" #__VA_ARGS__ ">/" #Method); \ - } \ - \ - protected: \ - virtual void BenchmarkCase(::benchmark::State&); \ - }; -#else -#define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(n, a) BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(n, a) -#endif - -#define BENCHMARK_DEFINE_F(BaseClass, Method) \ - BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \ - void BaseClass##_##Method##_Benchmark::BenchmarkCase - -#define BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a) \ - BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \ - void BaseClass##_##Method##_Benchmark::BenchmarkCase - -#define BENCHMARK_TEMPLATE2_DEFINE_F(BaseClass, Method, a, b) \ - BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \ - void BaseClass##_##Method##_Benchmark::BenchmarkCase - -#ifdef BENCHMARK_HAS_CXX11 -#define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, ...) \ - BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \ - void BaseClass##_##Method##_Benchmark::BenchmarkCase -#else -#define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, a) BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a) -#endif - -#define BENCHMARK_REGISTER_F(BaseClass, Method) \ - BENCHMARK_PRIVATE_REGISTER_F(BaseClass##_##Method##_Benchmark) - -#define BENCHMARK_PRIVATE_REGISTER_F(TestName) \ - BENCHMARK_PRIVATE_DECLARE(TestName) = \ - (::benchmark::internal::RegisterBenchmarkInternal(new TestName())) - -// This macro will define and register a benchmark within a fixture class. -#define BENCHMARK_F(BaseClass, Method) \ - BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \ - BENCHMARK_REGISTER_F(BaseClass, Method); \ - void BaseClass##_##Method##_Benchmark::BenchmarkCase - -#define BENCHMARK_TEMPLATE1_F(BaseClass, Method, a) \ - BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \ - BENCHMARK_REGISTER_F(BaseClass, Method); \ - void BaseClass##_##Method##_Benchmark::BenchmarkCase - -#define BENCHMARK_TEMPLATE2_F(BaseClass, Method, a, b) \ - BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \ - BENCHMARK_REGISTER_F(BaseClass, Method); \ - void BaseClass##_##Method##_Benchmark::BenchmarkCase - -#ifdef BENCHMARK_HAS_CXX11 -#define BENCHMARK_TEMPLATE_F(BaseClass, Method, ...) \ - BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \ - BENCHMARK_REGISTER_F(BaseClass, Method); \ - void BaseClass##_##Method##_Benchmark::BenchmarkCase -#else -#define BENCHMARK_TEMPLATE_F(BaseClass, Method, a) BENCHMARK_TEMPLATE1_F(BaseClass, Method, a) -#endif - -// Helper macro to create a main routine in a test that runs the benchmarks -#define BENCHMARK_MAIN() \ - int main(int argc, char** argv) { \ - ::benchmark::Initialize(&argc, argv); \ - if (::benchmark::ReportUnrecognizedArguments(argc, argv)) return 1; \ - ::benchmark::RunSpecifiedBenchmarks(); \ - } \ - int main(int, char**) - - -// ------------------------------------------------------ -// Benchmark Reporters - -namespace benchmark { - -struct CPUInfo { - struct CacheInfo { - std::string type; - int level; - int size; - int num_sharing; - }; - - int num_cpus; - double cycles_per_second; - std::vector caches; - bool scaling_enabled; - - static const CPUInfo& Get(); - - private: - CPUInfo(); - BENCHMARK_DISALLOW_COPY_AND_ASSIGN(CPUInfo); -}; - -// Interface for custom benchmark result printers. -// By default, benchmark reports are printed to stdout. However an application -// can control the destination of the reports by calling -// RunSpecifiedBenchmarks and passing it a custom reporter object. -// The reporter object must implement the following interface. -class BenchmarkReporter { - public: - struct Context { - CPUInfo const& cpu_info; - // The number of chars in the longest benchmark name. - size_t name_field_width; - static const char *executable_name; - Context(); - }; - - struct Run { - Run() - : error_occurred(false), - iterations(1), - time_unit(kNanosecond), - real_accumulated_time(0), - cpu_accumulated_time(0), - bytes_per_second(0), - items_per_second(0), - max_heapbytes_used(0), - complexity(oNone), - complexity_lambda(), - complexity_n(0), - report_big_o(false), - report_rms(false), - counters() {} - - std::string benchmark_name; - std::string report_label; // Empty if not set by benchmark. - bool error_occurred; - std::string error_message; - - int64_t iterations; - TimeUnit time_unit; - double real_accumulated_time; - double cpu_accumulated_time; - - // Return a value representing the real time per iteration in the unit - // specified by 'time_unit'. - // NOTE: If 'iterations' is zero the returned value represents the - // accumulated time. - double GetAdjustedRealTime() const; - - // Return a value representing the cpu time per iteration in the unit - // specified by 'time_unit'. - // NOTE: If 'iterations' is zero the returned value represents the - // accumulated time. - double GetAdjustedCPUTime() const; - - // Zero if not set by benchmark. - double bytes_per_second; - double items_per_second; - - // This is set to 0.0 if memory tracing is not enabled. - double max_heapbytes_used; - - // Keep track of arguments to compute asymptotic complexity - BigO complexity; - BigOFunc* complexity_lambda; - int64_t complexity_n; - - // what statistics to compute from the measurements - const std::vector* statistics; - - // Inform print function whether the current run is a complexity report - bool report_big_o; - bool report_rms; - - UserCounters counters; - }; - - // Construct a BenchmarkReporter with the output stream set to 'std::cout' - // and the error stream set to 'std::cerr' - BenchmarkReporter(); - - // Called once for every suite of benchmarks run. - // The parameter "context" contains information that the - // reporter may wish to use when generating its report, for example the - // platform under which the benchmarks are running. The benchmark run is - // never started if this function returns false, allowing the reporter - // to skip runs based on the context information. - virtual bool ReportContext(const Context& context) = 0; - - // Called once for each group of benchmark runs, gives information about - // cpu-time and heap memory usage during the benchmark run. If the group - // of runs contained more than two entries then 'report' contains additional - // elements representing the mean and standard deviation of those runs. - // Additionally if this group of runs was the last in a family of benchmarks - // 'reports' contains additional entries representing the asymptotic - // complexity and RMS of that benchmark family. - virtual void ReportRuns(const std::vector& report) = 0; - - // Called once and only once after ever group of benchmarks is run and - // reported. - virtual void Finalize() {} - - // REQUIRES: The object referenced by 'out' is valid for the lifetime - // of the reporter. - void SetOutputStream(std::ostream* out) { - assert(out); - output_stream_ = out; - } - - // REQUIRES: The object referenced by 'err' is valid for the lifetime - // of the reporter. - void SetErrorStream(std::ostream* err) { - assert(err); - error_stream_ = err; - } - - std::ostream& GetOutputStream() const { return *output_stream_; } - - std::ostream& GetErrorStream() const { return *error_stream_; } - - virtual ~BenchmarkReporter(); - - // Write a human readable string to 'out' representing the specified - // 'context'. - // REQUIRES: 'out' is non-null. - static void PrintBasicContext(std::ostream* out, Context const& context); - - private: - std::ostream* output_stream_; - std::ostream* error_stream_; -}; - -// Simple reporter that outputs benchmark data to the console. This is the -// default reporter used by RunSpecifiedBenchmarks(). -class ConsoleReporter : public BenchmarkReporter { -public: - enum OutputOptions { - OO_None = 0, - OO_Color = 1, - OO_Tabular = 2, - OO_ColorTabular = OO_Color|OO_Tabular, - OO_Defaults = OO_ColorTabular - }; - explicit ConsoleReporter(OutputOptions opts_ = OO_Defaults) - : output_options_(opts_), name_field_width_(0), - prev_counters_(), printed_header_(false) {} - - virtual bool ReportContext(const Context& context); - virtual void ReportRuns(const std::vector& reports); - - protected: - virtual void PrintRunData(const Run& report); - virtual void PrintHeader(const Run& report); - - OutputOptions output_options_; - size_t name_field_width_; - UserCounters prev_counters_; - bool printed_header_; -}; - -class JSONReporter : public BenchmarkReporter { - public: - JSONReporter() : first_report_(true) {} - virtual bool ReportContext(const Context& context); - virtual void ReportRuns(const std::vector& reports); - virtual void Finalize(); - - private: - void PrintRunData(const Run& report); - - bool first_report_; -}; - -class CSVReporter : public BenchmarkReporter { - public: - CSVReporter() : printed_header_(false) {} - virtual bool ReportContext(const Context& context); - virtual void ReportRuns(const std::vector& reports); - - private: - void PrintRunData(const Run& report); - - bool printed_header_; - std::set< std::string > user_counter_names_; -}; - -inline const char* GetTimeUnitString(TimeUnit unit) { - switch (unit) { - case kMillisecond: - return "ms"; - case kMicrosecond: - return "us"; - case kNanosecond: - default: - return "ns"; - } -} - -inline double GetTimeUnitMultiplier(TimeUnit unit) { - switch (unit) { - case kMillisecond: - return 1e3; - case kMicrosecond: - return 1e6; - case kNanosecond: - default: - return 1e9; - } -} - -} // namespace benchmark - -#endif // BENCHMARK_BENCHMARK_H_ diff --git a/benchmarks/thirdparty/benchmark/mingw.py b/benchmarks/thirdparty/benchmark/mingw.py deleted file mode 100755 index 706ad559db..0000000000 --- a/benchmarks/thirdparty/benchmark/mingw.py +++ /dev/null @@ -1,320 +0,0 @@ -#! /usr/bin/env python -# encoding: utf-8 - -import argparse -import errno -import logging -import os -import platform -import re -import sys -import subprocess -import tempfile - -try: - import winreg -except ImportError: - import _winreg as winreg -try: - import urllib.request as request -except ImportError: - import urllib as request -try: - import urllib.parse as parse -except ImportError: - import urlparse as parse - -class EmptyLogger(object): - ''' - Provides an implementation that performs no logging - ''' - def debug(self, *k, **kw): - pass - def info(self, *k, **kw): - pass - def warn(self, *k, **kw): - pass - def error(self, *k, **kw): - pass - def critical(self, *k, **kw): - pass - def setLevel(self, *k, **kw): - pass - -urls = ( - 'http://downloads.sourceforge.net/project/mingw-w64/Toolchains%20' - 'targetting%20Win32/Personal%20Builds/mingw-builds/installer/' - 'repository.txt', - 'http://downloads.sourceforge.net/project/mingwbuilds/host-windows/' - 'repository.txt' -) -''' -A list of mingw-build repositories -''' - -def repository(urls = urls, log = EmptyLogger()): - ''' - Downloads and parse mingw-build repository files and parses them - ''' - log.info('getting mingw-builds repository') - versions = {} - re_sourceforge = re.compile(r'http://sourceforge.net/projects/([^/]+)/files') - re_sub = r'http://downloads.sourceforge.net/project/\1' - for url in urls: - log.debug(' - requesting: %s', url) - socket = request.urlopen(url) - repo = socket.read() - if not isinstance(repo, str): - repo = repo.decode(); - socket.close() - for entry in repo.split('\n')[:-1]: - value = entry.split('|') - version = tuple([int(n) for n in value[0].strip().split('.')]) - version = versions.setdefault(version, {}) - arch = value[1].strip() - if arch == 'x32': - arch = 'i686' - elif arch == 'x64': - arch = 'x86_64' - arch = version.setdefault(arch, {}) - threading = arch.setdefault(value[2].strip(), {}) - exceptions = threading.setdefault(value[3].strip(), {}) - revision = exceptions.setdefault(int(value[4].strip()[3:]), - re_sourceforge.sub(re_sub, value[5].strip())) - return versions - -def find_in_path(file, path=None): - ''' - Attempts to find an executable in the path - ''' - if platform.system() == 'Windows': - file += '.exe' - if path is None: - path = os.environ.get('PATH', '') - if type(path) is type(''): - path = path.split(os.pathsep) - return list(filter(os.path.exists, - map(lambda dir, file=file: os.path.join(dir, file), path))) - -def find_7zip(log = EmptyLogger()): - ''' - Attempts to find 7zip for unpacking the mingw-build archives - ''' - log.info('finding 7zip') - path = find_in_path('7z') - if not path: - key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\7-Zip') - path, _ = winreg.QueryValueEx(key, 'Path') - path = [os.path.join(path, '7z.exe')] - log.debug('found \'%s\'', path[0]) - return path[0] - -find_7zip() - -def unpack(archive, location, log = EmptyLogger()): - ''' - Unpacks a mingw-builds archive - ''' - sevenzip = find_7zip(log) - log.info('unpacking %s', os.path.basename(archive)) - cmd = [sevenzip, 'x', archive, '-o' + location, '-y'] - log.debug(' - %r', cmd) - with open(os.devnull, 'w') as devnull: - subprocess.check_call(cmd, stdout = devnull) - -def download(url, location, log = EmptyLogger()): - ''' - Downloads and unpacks a mingw-builds archive - ''' - log.info('downloading MinGW') - log.debug(' - url: %s', url) - log.debug(' - location: %s', location) - - re_content = re.compile(r'attachment;[ \t]*filename=(")?([^"]*)(")?[\r\n]*') - - stream = request.urlopen(url) - try: - content = stream.getheader('Content-Disposition') or '' - except AttributeError: - content = stream.headers.getheader('Content-Disposition') or '' - matches = re_content.match(content) - if matches: - filename = matches.group(2) - else: - parsed = parse.urlparse(stream.geturl()) - filename = os.path.basename(parsed.path) - - try: - os.makedirs(location) - except OSError as e: - if e.errno == errno.EEXIST and os.path.isdir(location): - pass - else: - raise - - archive = os.path.join(location, filename) - with open(archive, 'wb') as out: - while True: - buf = stream.read(1024) - if not buf: - break - out.write(buf) - unpack(archive, location, log = log) - os.remove(archive) - - possible = os.path.join(location, 'mingw64') - if not os.path.exists(possible): - possible = os.path.join(location, 'mingw32') - if not os.path.exists(possible): - raise ValueError('Failed to find unpacked MinGW: ' + possible) - return possible - -def root(location = None, arch = None, version = None, threading = None, - exceptions = None, revision = None, log = EmptyLogger()): - ''' - Returns the root folder of a specific version of the mingw-builds variant - of gcc. Will download the compiler if needed - ''' - - # Get the repository if we don't have all the information - if not (arch and version and threading and exceptions and revision): - versions = repository(log = log) - - # Determine some defaults - version = version or max(versions.keys()) - if not arch: - arch = platform.machine().lower() - if arch == 'x86': - arch = 'i686' - elif arch == 'amd64': - arch = 'x86_64' - if not threading: - keys = versions[version][arch].keys() - if 'posix' in keys: - threading = 'posix' - elif 'win32' in keys: - threading = 'win32' - else: - threading = keys[0] - if not exceptions: - keys = versions[version][arch][threading].keys() - if 'seh' in keys: - exceptions = 'seh' - elif 'sjlj' in keys: - exceptions = 'sjlj' - else: - exceptions = keys[0] - if revision == None: - revision = max(versions[version][arch][threading][exceptions].keys()) - if not location: - location = os.path.join(tempfile.gettempdir(), 'mingw-builds') - - # Get the download url - url = versions[version][arch][threading][exceptions][revision] - - # Tell the user whatzzup - log.info('finding MinGW %s', '.'.join(str(v) for v in version)) - log.debug(' - arch: %s', arch) - log.debug(' - threading: %s', threading) - log.debug(' - exceptions: %s', exceptions) - log.debug(' - revision: %s', revision) - log.debug(' - url: %s', url) - - # Store each specific revision differently - slug = '{version}-{arch}-{threading}-{exceptions}-rev{revision}' - slug = slug.format( - version = '.'.join(str(v) for v in version), - arch = arch, - threading = threading, - exceptions = exceptions, - revision = revision - ) - if arch == 'x86_64': - root_dir = os.path.join(location, slug, 'mingw64') - elif arch == 'i686': - root_dir = os.path.join(location, slug, 'mingw32') - else: - raise ValueError('Unknown MinGW arch: ' + arch) - - # Download if needed - if not os.path.exists(root_dir): - downloaded = download(url, os.path.join(location, slug), log = log) - if downloaded != root_dir: - raise ValueError('The location of mingw did not match\n%s\n%s' - % (downloaded, root_dir)) - - return root_dir - -def str2ver(string): - ''' - Converts a version string into a tuple - ''' - try: - version = tuple(int(v) for v in string.split('.')) - if len(version) is not 3: - raise ValueError() - except ValueError: - raise argparse.ArgumentTypeError( - 'please provide a three digit version string') - return version - -def main(): - ''' - Invoked when the script is run directly by the python interpreter - ''' - parser = argparse.ArgumentParser( - description = 'Downloads a specific version of MinGW', - formatter_class = argparse.ArgumentDefaultsHelpFormatter - ) - parser.add_argument('--location', - help = 'the location to download the compiler to', - default = os.path.join(tempfile.gettempdir(), 'mingw-builds')) - parser.add_argument('--arch', required = True, choices = ['i686', 'x86_64'], - help = 'the target MinGW architecture string') - parser.add_argument('--version', type = str2ver, - help = 'the version of GCC to download') - parser.add_argument('--threading', choices = ['posix', 'win32'], - help = 'the threading type of the compiler') - parser.add_argument('--exceptions', choices = ['sjlj', 'seh', 'dwarf'], - help = 'the method to throw exceptions') - parser.add_argument('--revision', type=int, - help = 'the revision of the MinGW release') - group = parser.add_mutually_exclusive_group() - group.add_argument('-v', '--verbose', action='store_true', - help='increase the script output verbosity') - group.add_argument('-q', '--quiet', action='store_true', - help='only print errors and warning') - args = parser.parse_args() - - # Create the logger - logger = logging.getLogger('mingw') - handler = logging.StreamHandler() - formatter = logging.Formatter('%(message)s') - handler.setFormatter(formatter) - logger.addHandler(handler) - logger.setLevel(logging.INFO) - if args.quiet: - logger.setLevel(logging.WARN) - if args.verbose: - logger.setLevel(logging.DEBUG) - - # Get MinGW - root_dir = root(location = args.location, arch = args.arch, - version = args.version, threading = args.threading, - exceptions = args.exceptions, revision = args.revision, - log = logger) - - sys.stdout.write('%s\n' % os.path.join(root_dir, 'bin')) - -if __name__ == '__main__': - try: - main() - except IOError as e: - sys.stderr.write('IO error: %s\n' % e) - sys.exit(1) - except OSError as e: - sys.stderr.write('OS error: %s\n' % e) - sys.exit(1) - except KeyboardInterrupt as e: - sys.stderr.write('Killed\n') - sys.exit(1) diff --git a/benchmarks/thirdparty/benchmark/releasing.md b/benchmarks/thirdparty/benchmark/releasing.md deleted file mode 100755 index f0cd7010e3..0000000000 --- a/benchmarks/thirdparty/benchmark/releasing.md +++ /dev/null @@ -1,16 +0,0 @@ -# How to release - -* Make sure you're on master and synced to HEAD -* Ensure the project builds and tests run (sanity check only, obviously) - * `parallel -j0 exec ::: test/*_test` can help ensure everything at least - passes -* Prepare release notes - * `git log $(git describe --abbrev=0 --tags)..HEAD` gives you the list of - commits between the last annotated tag and HEAD - * Pick the most interesting. -* Create a release through github's interface - * Note this will create a lightweight tag. - * Update this to an annotated tag: - * `git pull --tags` - * `git tag -a -f ` - * `git push --force origin` diff --git a/benchmarks/thirdparty/benchmark/src/CMakeLists.txt b/benchmarks/thirdparty/benchmark/src/CMakeLists.txt deleted file mode 100755 index 701804ba0e..0000000000 --- a/benchmarks/thirdparty/benchmark/src/CMakeLists.txt +++ /dev/null @@ -1,105 +0,0 @@ -# Allow the source files to find headers in src/ -include_directories(${PROJECT_SOURCE_DIR}/src) - -if (DEFINED BENCHMARK_CXX_LINKER_FLAGS) - list(APPEND CMAKE_SHARED_LINKER_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}) - list(APPEND CMAKE_MODULE_LINKER_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}) -endif() - -file(GLOB - SOURCE_FILES - *.cc - ${PROJECT_SOURCE_DIR}/include/benchmark/*.h - ${CMAKE_CURRENT_SOURCE_DIR}/*.h) -list(FILTER SOURCE_FILES EXCLUDE REGEX "benchmark_main\\.cc") - -add_library(benchmark ${SOURCE_FILES}) -set_target_properties(benchmark PROPERTIES - OUTPUT_NAME "benchmark" - VERSION ${GENERIC_LIB_VERSION} - SOVERSION ${GENERIC_LIB_SOVERSION} -) -target_include_directories(benchmark PUBLIC - $ - ) - -# Link threads. -target_link_libraries(benchmark ${BENCHMARK_CXX_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) -find_library(LIBRT rt) -if(LIBRT) - target_link_libraries(benchmark ${LIBRT}) -endif() - -# We need extra libraries on Windows -if(${CMAKE_SYSTEM_NAME} MATCHES "Windows") - target_link_libraries(benchmark Shlwapi) -endif() - -# We need extra libraries on Solaris -if(${CMAKE_SYSTEM_NAME} MATCHES "SunOS") - target_link_libraries(benchmark kstat) -endif() - -# Benchmark main library -add_library(benchmark_main "benchmark_main.cc") -set_target_properties(benchmark_main PROPERTIES - OUTPUT_NAME "benchmark_main" - VERSION ${GENERIC_LIB_VERSION} - SOVERSION ${GENERIC_LIB_SOVERSION} -) -target_include_directories(benchmark PUBLIC - $ - ) -target_link_libraries(benchmark_main benchmark) - -set(include_install_dir "include") -set(lib_install_dir "lib/") -set(bin_install_dir "bin/") -set(config_install_dir "lib/cmake/${PROJECT_NAME}") -set(pkgconfig_install_dir "lib/pkgconfig") - -set(generated_dir "${CMAKE_CURRENT_BINARY_DIR}/generated") - -set(version_config "${generated_dir}/${PROJECT_NAME}ConfigVersion.cmake") -set(project_config "${generated_dir}/${PROJECT_NAME}Config.cmake") -set(pkg_config "${generated_dir}/${PROJECT_NAME}.pc") -set(targets_export_name "${PROJECT_NAME}Targets") - -set(namespace "${PROJECT_NAME}::") - -include(CMakePackageConfigHelpers) -write_basic_package_version_file( - "${version_config}" VERSION ${GIT_VERSION} COMPATIBILITY SameMajorVersion -) - -configure_file("${PROJECT_SOURCE_DIR}/cmake/Config.cmake.in" "${project_config}" @ONLY) -configure_file("${PROJECT_SOURCE_DIR}/cmake/benchmark.pc.in" "${pkg_config}" @ONLY) - -if (BENCHMARK_ENABLE_INSTALL) - # Install target (will install the library to specified CMAKE_INSTALL_PREFIX variable) - install( - TARGETS benchmark benchmark_main - EXPORT ${targets_export_name} - ARCHIVE DESTINATION ${lib_install_dir} - LIBRARY DESTINATION ${lib_install_dir} - RUNTIME DESTINATION ${bin_install_dir} - INCLUDES DESTINATION ${include_install_dir}) - - install( - DIRECTORY "${PROJECT_SOURCE_DIR}/include/benchmark" - DESTINATION ${include_install_dir} - FILES_MATCHING PATTERN "*.*h") - - install( - FILES "${project_config}" "${version_config}" - DESTINATION "${config_install_dir}") - - install( - FILES "${pkg_config}" - DESTINATION "${pkgconfig_install_dir}") - - install( - EXPORT "${targets_export_name}" - NAMESPACE "${namespace}" - DESTINATION "${config_install_dir}") -endif() diff --git a/benchmarks/thirdparty/benchmark/src/arraysize.h b/benchmarks/thirdparty/benchmark/src/arraysize.h deleted file mode 100755 index 51a50f2dff..0000000000 --- a/benchmarks/thirdparty/benchmark/src/arraysize.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef BENCHMARK_ARRAYSIZE_H_ -#define BENCHMARK_ARRAYSIZE_H_ - -#include "internal_macros.h" - -namespace benchmark { -namespace internal { -// The arraysize(arr) macro returns the # of elements in an array arr. -// The expression is a compile-time constant, and therefore can be -// used in defining new arrays, for example. If you use arraysize on -// a pointer by mistake, you will get a compile-time error. -// - -// This template function declaration is used in defining arraysize. -// Note that the function doesn't need an implementation, as we only -// use its type. -template -char (&ArraySizeHelper(T (&array)[N]))[N]; - -// That gcc wants both of these prototypes seems mysterious. VC, for -// its part, can't decide which to use (another mystery). Matching of -// template overloads: the final frontier. -#ifndef COMPILER_MSVC -template -char (&ArraySizeHelper(const T (&array)[N]))[N]; -#endif - -#define arraysize(array) (sizeof(::benchmark::internal::ArraySizeHelper(array))) - -} // end namespace internal -} // end namespace benchmark - -#endif // BENCHMARK_ARRAYSIZE_H_ diff --git a/benchmarks/thirdparty/benchmark/src/benchmark.cc b/benchmarks/thirdparty/benchmark/src/benchmark.cc deleted file mode 100755 index 82b15ac709..0000000000 --- a/benchmarks/thirdparty/benchmark/src/benchmark.cc +++ /dev/null @@ -1,630 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "benchmark/benchmark.h" -#include "benchmark_api_internal.h" -#include "internal_macros.h" - -#ifndef BENCHMARK_OS_WINDOWS -#ifndef BENCHMARK_OS_FUCHSIA -#include -#endif -#include -#include -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "check.h" -#include "colorprint.h" -#include "commandlineflags.h" -#include "complexity.h" -#include "counter.h" -#include "internal_macros.h" -#include "log.h" -#include "mutex.h" -#include "re.h" -#include "statistics.h" -#include "string_util.h" -#include "thread_manager.h" -#include "thread_timer.h" - -DEFINE_bool(benchmark_list_tests, false, - "Print a list of benchmarks. This option overrides all other " - "options."); - -DEFINE_string(benchmark_filter, ".", - "A regular expression that specifies the set of benchmarks " - "to execute. If this flag is empty, no benchmarks are run. " - "If this flag is the string \"all\", all benchmarks linked " - "into the process are run."); - -DEFINE_double(benchmark_min_time, 0.5, - "Minimum number of seconds we should run benchmark before " - "results are considered significant. For cpu-time based " - "tests, this is the lower bound on the total cpu time " - "used by all threads that make up the test. For real-time " - "based tests, this is the lower bound on the elapsed time " - "of the benchmark execution, regardless of number of " - "threads."); - -DEFINE_int32(benchmark_repetitions, 1, - "The number of runs of each benchmark. If greater than 1, the " - "mean and standard deviation of the runs will be reported."); - -DEFINE_bool(benchmark_report_aggregates_only, false, - "Report the result of each benchmark repetitions. When 'true' is " - "specified only the mean, standard deviation, and other statistics " - "are reported for repeated benchmarks."); - -DEFINE_string(benchmark_format, "console", - "The format to use for console output. Valid values are " - "'console', 'json', or 'csv'."); - -DEFINE_string(benchmark_out_format, "json", - "The format to use for file output. Valid values are " - "'console', 'json', or 'csv'."); - -DEFINE_string(benchmark_out, "", "The file to write additional output to"); - -DEFINE_string(benchmark_color, "auto", - "Whether to use colors in the output. Valid values: " - "'true'/'yes'/1, 'false'/'no'/0, and 'auto'. 'auto' means to use " - "colors if the output is being sent to a terminal and the TERM " - "environment variable is set to a terminal type that supports " - "colors."); - -DEFINE_bool(benchmark_counters_tabular, false, - "Whether to use tabular format when printing user counters to " - "the console. Valid values: 'true'/'yes'/1, 'false'/'no'/0." - "Defaults to false."); - -DEFINE_int32(v, 0, "The level of verbose logging to output"); - -namespace benchmark { - -namespace { -static const size_t kMaxIterations = 1000000000; -} // end namespace - -namespace internal { - -void UseCharPointer(char const volatile*) {} - -namespace { - -BenchmarkReporter::Run CreateRunReport( - const benchmark::internal::Benchmark::Instance& b, - const internal::ThreadManager::Result& results, - double seconds) { - // Create report about this benchmark run. - BenchmarkReporter::Run report; - - report.benchmark_name = b.name; - report.error_occurred = results.has_error_; - report.error_message = results.error_message_; - report.report_label = results.report_label_; - // This is the total iterations across all threads. - report.iterations = results.iterations; - report.time_unit = b.time_unit; - - if (!report.error_occurred) { - double bytes_per_second = 0; - if (results.bytes_processed > 0 && seconds > 0.0) { - bytes_per_second = (results.bytes_processed / seconds); - } - double items_per_second = 0; - if (results.items_processed > 0 && seconds > 0.0) { - items_per_second = (results.items_processed / seconds); - } - - if (b.use_manual_time) { - report.real_accumulated_time = results.manual_time_used; - } else { - report.real_accumulated_time = results.real_time_used; - } - report.cpu_accumulated_time = results.cpu_time_used; - report.bytes_per_second = bytes_per_second; - report.items_per_second = items_per_second; - report.complexity_n = results.complexity_n; - report.complexity = b.complexity; - report.complexity_lambda = b.complexity_lambda; - report.statistics = b.statistics; - report.counters = results.counters; - internal::Finish(&report.counters, seconds, b.threads); - } - return report; -} - -// Execute one thread of benchmark b for the specified number of iterations. -// Adds the stats collected for the thread into *total. -void RunInThread(const benchmark::internal::Benchmark::Instance* b, - size_t iters, int thread_id, - internal::ThreadManager* manager) { - internal::ThreadTimer timer; - State st(iters, b->arg, thread_id, b->threads, &timer, manager); - b->benchmark->Run(st); - CHECK(st.iterations() >= st.max_iterations) - << "Benchmark returned before State::KeepRunning() returned false!"; - { - MutexLock l(manager->GetBenchmarkMutex()); - internal::ThreadManager::Result& results = manager->results; - results.iterations += st.iterations(); - results.cpu_time_used += timer.cpu_time_used(); - results.real_time_used += timer.real_time_used(); - results.manual_time_used += timer.manual_time_used(); - results.bytes_processed += st.bytes_processed(); - results.items_processed += st.items_processed(); - results.complexity_n += st.complexity_length_n(); - internal::Increment(&results.counters, st.counters); - } - manager->NotifyThreadComplete(); -} - -std::vector RunBenchmark( - const benchmark::internal::Benchmark::Instance& b, - std::vector* complexity_reports) { - std::vector reports; // return value - - const bool has_explicit_iteration_count = b.iterations != 0; - size_t iters = has_explicit_iteration_count ? b.iterations : 1; - std::unique_ptr manager; - std::vector pool(b.threads - 1); - const int repeats = - b.repetitions != 0 ? b.repetitions : FLAGS_benchmark_repetitions; - const bool report_aggregates_only = - repeats != 1 && - (b.report_mode == internal::RM_Unspecified - ? FLAGS_benchmark_report_aggregates_only - : b.report_mode == internal::RM_ReportAggregatesOnly); - for (int repetition_num = 0; repetition_num < repeats; repetition_num++) { - for (;;) { - // Try benchmark - VLOG(2) << "Running " << b.name << " for " << iters << "\n"; - - manager.reset(new internal::ThreadManager(b.threads)); - for (std::size_t ti = 0; ti < pool.size(); ++ti) { - pool[ti] = std::thread(&RunInThread, &b, iters, - static_cast(ti + 1), manager.get()); - } - RunInThread(&b, iters, 0, manager.get()); - manager->WaitForAllThreads(); - for (std::thread& thread : pool) thread.join(); - internal::ThreadManager::Result results; - { - MutexLock l(manager->GetBenchmarkMutex()); - results = manager->results; - } - manager.reset(); - // Adjust real/manual time stats since they were reported per thread. - results.real_time_used /= b.threads; - results.manual_time_used /= b.threads; - - VLOG(2) << "Ran in " << results.cpu_time_used << "/" - << results.real_time_used << "\n"; - - // Base decisions off of real time if requested by this benchmark. - double seconds = results.cpu_time_used; - if (b.use_manual_time) { - seconds = results.manual_time_used; - } else if (b.use_real_time) { - seconds = results.real_time_used; - } - - const double min_time = - !IsZero(b.min_time) ? b.min_time : FLAGS_benchmark_min_time; - - // Determine if this run should be reported; Either it has - // run for a sufficient amount of time or because an error was reported. - const bool should_report = repetition_num > 0 - || has_explicit_iteration_count // An exact iteration count was requested - || results.has_error_ - || iters >= kMaxIterations // No chance to try again, we hit the limit. - || seconds >= min_time // the elapsed time is large enough - // CPU time is specified but the elapsed real time greatly exceeds the - // minimum time. Note that user provided timers are except from this - // sanity check. - || ((results.real_time_used >= 5 * min_time) && !b.use_manual_time); - - if (should_report) { - BenchmarkReporter::Run report = CreateRunReport(b, results, seconds); - if (!report.error_occurred && b.complexity != oNone) - complexity_reports->push_back(report); - reports.push_back(report); - break; - } - - // See how much iterations should be increased by - // Note: Avoid division by zero with max(seconds, 1ns). - double multiplier = min_time * 1.4 / std::max(seconds, 1e-9); - // If our last run was at least 10% of FLAGS_benchmark_min_time then we - // use the multiplier directly. Otherwise we use at most 10 times - // expansion. - // NOTE: When the last run was at least 10% of the min time the max - // expansion should be 14x. - bool is_significant = (seconds / min_time) > 0.1; - multiplier = is_significant ? multiplier : std::min(10.0, multiplier); - if (multiplier <= 1.0) multiplier = 2.0; - double next_iters = std::max(multiplier * iters, iters + 1.0); - if (next_iters > kMaxIterations) { - next_iters = kMaxIterations; - } - VLOG(3) << "Next iters: " << next_iters << ", " << multiplier << "\n"; - iters = static_cast(next_iters + 0.5); - } - } - // Calculate additional statistics - auto stat_reports = ComputeStats(reports); - if ((b.complexity != oNone) && b.last_benchmark_instance) { - auto additional_run_stats = ComputeBigO(*complexity_reports); - stat_reports.insert(stat_reports.end(), additional_run_stats.begin(), - additional_run_stats.end()); - complexity_reports->clear(); - } - - if (report_aggregates_only) reports.clear(); - reports.insert(reports.end(), stat_reports.begin(), stat_reports.end()); - return reports; -} - -} // namespace -} // namespace internal - -State::State(size_t max_iters, const std::vector& ranges, int thread_i, - int n_threads, internal::ThreadTimer* timer, - internal::ThreadManager* manager) - : total_iterations_(0), - batch_leftover_(0), - max_iterations(max_iters), - started_(false), - finished_(false), - error_occurred_(false), - range_(ranges), - bytes_processed_(0), - items_processed_(0), - complexity_n_(0), - counters(), - thread_index(thread_i), - threads(n_threads), - timer_(timer), - manager_(manager) { - CHECK(max_iterations != 0) << "At least one iteration must be run"; - CHECK_LT(thread_index, threads) << "thread_index must be less than threads"; - - // Note: The use of offsetof below is technically undefined until C++17 - // because State is not a standard layout type. However, all compilers - // currently provide well-defined behavior as an extension (which is - // demonstrated since constexpr evaluation must diagnose all undefined - // behavior). However, GCC and Clang also warn about this use of offsetof, - // which must be suppressed. -#ifdef __GNUC__ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Winvalid-offsetof" -#endif - // Offset tests to ensure commonly accessed data is on the first cache line. - const int cache_line_size = 64; - static_assert(offsetof(State, error_occurred_) <= - (cache_line_size - sizeof(error_occurred_)), ""); -#ifdef __GNUC__ -#pragma GCC diagnostic pop -#endif -} - -void State::PauseTiming() { - // Add in time accumulated so far - CHECK(started_ && !finished_ && !error_occurred_); - timer_->StopTimer(); -} - -void State::ResumeTiming() { - CHECK(started_ && !finished_ && !error_occurred_); - timer_->StartTimer(); -} - -void State::SkipWithError(const char* msg) { - CHECK(msg); - error_occurred_ = true; - { - MutexLock l(manager_->GetBenchmarkMutex()); - if (manager_->results.has_error_ == false) { - manager_->results.error_message_ = msg; - manager_->results.has_error_ = true; - } - } - total_iterations_ = 0; - if (timer_->running()) timer_->StopTimer(); -} - -void State::SetIterationTime(double seconds) { - timer_->SetIterationTime(seconds); -} - -void State::SetLabel(const char* label) { - MutexLock l(manager_->GetBenchmarkMutex()); - manager_->results.report_label_ = label; -} - -void State::StartKeepRunning() { - CHECK(!started_ && !finished_); - started_ = true; - total_iterations_ = error_occurred_ ? 0 : max_iterations; - manager_->StartStopBarrier(); - if (!error_occurred_) ResumeTiming(); -} - -void State::FinishKeepRunning() { - CHECK(started_ && (!finished_ || error_occurred_)); - if (!error_occurred_) { - PauseTiming(); - } - // Total iterations has now wrapped around past 0. Fix this. - total_iterations_ = 0; - finished_ = true; - manager_->StartStopBarrier(); -} - -namespace internal { -namespace { - -void RunBenchmarks(const std::vector& benchmarks, - BenchmarkReporter* console_reporter, - BenchmarkReporter* file_reporter) { - // Note the file_reporter can be null. - CHECK(console_reporter != nullptr); - - // Determine the width of the name field using a minimum width of 10. - bool has_repetitions = FLAGS_benchmark_repetitions > 1; - size_t name_field_width = 10; - size_t stat_field_width = 0; - for (const Benchmark::Instance& benchmark : benchmarks) { - name_field_width = - std::max(name_field_width, benchmark.name.size()); - has_repetitions |= benchmark.repetitions > 1; - - for(const auto& Stat : *benchmark.statistics) - stat_field_width = std::max(stat_field_width, Stat.name_.size()); - } - if (has_repetitions) name_field_width += 1 + stat_field_width; - - // Print header here - BenchmarkReporter::Context context; - context.name_field_width = name_field_width; - - // Keep track of running times of all instances of current benchmark - std::vector complexity_reports; - - // We flush streams after invoking reporter methods that write to them. This - // ensures users get timely updates even when streams are not line-buffered. - auto flushStreams = [](BenchmarkReporter* reporter) { - if (!reporter) return; - std::flush(reporter->GetOutputStream()); - std::flush(reporter->GetErrorStream()); - }; - - if (console_reporter->ReportContext(context) && - (!file_reporter || file_reporter->ReportContext(context))) { - flushStreams(console_reporter); - flushStreams(file_reporter); - for (const auto& benchmark : benchmarks) { - std::vector reports = - RunBenchmark(benchmark, &complexity_reports); - console_reporter->ReportRuns(reports); - if (file_reporter) file_reporter->ReportRuns(reports); - flushStreams(console_reporter); - flushStreams(file_reporter); - } - } - console_reporter->Finalize(); - if (file_reporter) file_reporter->Finalize(); - flushStreams(console_reporter); - flushStreams(file_reporter); -} - -std::unique_ptr CreateReporter( - std::string const& name, ConsoleReporter::OutputOptions output_opts) { - typedef std::unique_ptr PtrType; - if (name == "console") { - return PtrType(new ConsoleReporter(output_opts)); - } else if (name == "json") { - return PtrType(new JSONReporter); - } else if (name == "csv") { - return PtrType(new CSVReporter); - } else { - std::cerr << "Unexpected format: '" << name << "'\n"; - std::exit(1); - } -} - -} // end namespace - -bool IsZero(double n) { - return std::abs(n) < std::numeric_limits::epsilon(); -} - -ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color) { - int output_opts = ConsoleReporter::OO_Defaults; - if ((FLAGS_benchmark_color == "auto" && IsColorTerminal()) || - IsTruthyFlagValue(FLAGS_benchmark_color)) { - output_opts |= ConsoleReporter::OO_Color; - } else { - output_opts &= ~ConsoleReporter::OO_Color; - } - if(force_no_color) { - output_opts &= ~ConsoleReporter::OO_Color; - } - if(FLAGS_benchmark_counters_tabular) { - output_opts |= ConsoleReporter::OO_Tabular; - } else { - output_opts &= ~ConsoleReporter::OO_Tabular; - } - return static_cast< ConsoleReporter::OutputOptions >(output_opts); -} - -} // end namespace internal - -size_t RunSpecifiedBenchmarks() { - return RunSpecifiedBenchmarks(nullptr, nullptr); -} - -size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter) { - return RunSpecifiedBenchmarks(console_reporter, nullptr); -} - -size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter, - BenchmarkReporter* file_reporter) { - std::string spec = FLAGS_benchmark_filter; - if (spec.empty() || spec == "all") - spec = "."; // Regexp that matches all benchmarks - - // Setup the reporters - std::ofstream output_file; - std::unique_ptr default_console_reporter; - std::unique_ptr default_file_reporter; - if (!console_reporter) { - default_console_reporter = internal::CreateReporter( - FLAGS_benchmark_format, internal::GetOutputOptions()); - console_reporter = default_console_reporter.get(); - } - auto& Out = console_reporter->GetOutputStream(); - auto& Err = console_reporter->GetErrorStream(); - - std::string const& fname = FLAGS_benchmark_out; - if (fname.empty() && file_reporter) { - Err << "A custom file reporter was provided but " - "--benchmark_out= was not specified." - << std::endl; - std::exit(1); - } - if (!fname.empty()) { - output_file.open(fname); - if (!output_file.is_open()) { - Err << "invalid file name: '" << fname << std::endl; - std::exit(1); - } - if (!file_reporter) { - default_file_reporter = internal::CreateReporter( - FLAGS_benchmark_out_format, ConsoleReporter::OO_None); - file_reporter = default_file_reporter.get(); - } - file_reporter->SetOutputStream(&output_file); - file_reporter->SetErrorStream(&output_file); - } - - std::vector benchmarks; - if (!FindBenchmarksInternal(spec, &benchmarks, &Err)) return 0; - - if (benchmarks.empty()) { - Err << "Failed to match any benchmarks against regex: " << spec << "\n"; - return 0; - } - - if (FLAGS_benchmark_list_tests) { - for (auto const& benchmark : benchmarks) Out << benchmark.name << "\n"; - } else { - internal::RunBenchmarks(benchmarks, console_reporter, file_reporter); - } - - return benchmarks.size(); -} - -namespace internal { - -void PrintUsageAndExit() { - fprintf(stdout, - "benchmark" - " [--benchmark_list_tests={true|false}]\n" - " [--benchmark_filter=]\n" - " [--benchmark_min_time=]\n" - " [--benchmark_repetitions=]\n" - " [--benchmark_report_aggregates_only={true|false}\n" - " [--benchmark_format=]\n" - " [--benchmark_out=]\n" - " [--benchmark_out_format=]\n" - " [--benchmark_color={auto|true|false}]\n" - " [--benchmark_counters_tabular={true|false}]\n" - " [--v=]\n"); - exit(0); -} - -void ParseCommandLineFlags(int* argc, char** argv) { - using namespace benchmark; - BenchmarkReporter::Context::executable_name = argv[0]; - for (int i = 1; i < *argc; ++i) { - if (ParseBoolFlag(argv[i], "benchmark_list_tests", - &FLAGS_benchmark_list_tests) || - ParseStringFlag(argv[i], "benchmark_filter", &FLAGS_benchmark_filter) || - ParseDoubleFlag(argv[i], "benchmark_min_time", - &FLAGS_benchmark_min_time) || - ParseInt32Flag(argv[i], "benchmark_repetitions", - &FLAGS_benchmark_repetitions) || - ParseBoolFlag(argv[i], "benchmark_report_aggregates_only", - &FLAGS_benchmark_report_aggregates_only) || - ParseStringFlag(argv[i], "benchmark_format", &FLAGS_benchmark_format) || - ParseStringFlag(argv[i], "benchmark_out", &FLAGS_benchmark_out) || - ParseStringFlag(argv[i], "benchmark_out_format", - &FLAGS_benchmark_out_format) || - ParseStringFlag(argv[i], "benchmark_color", &FLAGS_benchmark_color) || - // "color_print" is the deprecated name for "benchmark_color". - // TODO: Remove this. - ParseStringFlag(argv[i], "color_print", &FLAGS_benchmark_color) || - ParseBoolFlag(argv[i], "benchmark_counters_tabular", - &FLAGS_benchmark_counters_tabular) || - ParseInt32Flag(argv[i], "v", &FLAGS_v)) { - for (int j = i; j != *argc - 1; ++j) argv[j] = argv[j + 1]; - - --(*argc); - --i; - } else if (IsFlag(argv[i], "help")) { - PrintUsageAndExit(); - } - } - for (auto const* flag : - {&FLAGS_benchmark_format, &FLAGS_benchmark_out_format}) - if (*flag != "console" && *flag != "json" && *flag != "csv") { - PrintUsageAndExit(); - } - if (FLAGS_benchmark_color.empty()) { - PrintUsageAndExit(); - } -} - -int InitializeStreams() { - static std::ios_base::Init init; - return 0; -} - -} // end namespace internal - -void Initialize(int* argc, char** argv) { - internal::ParseCommandLineFlags(argc, argv); - internal::LogLevel() = FLAGS_v; -} - -bool ReportUnrecognizedArguments(int argc, char** argv) { - for (int i = 1; i < argc; ++i) { - fprintf(stderr, "%s: error: unrecognized command-line flag: %s\n", argv[0], argv[i]); - } - return argc > 1; -} - -} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/benchmark_api_internal.h b/benchmarks/thirdparty/benchmark/src/benchmark_api_internal.h deleted file mode 100755 index dd7a3ffe8c..0000000000 --- a/benchmarks/thirdparty/benchmark/src/benchmark_api_internal.h +++ /dev/null @@ -1,47 +0,0 @@ -#ifndef BENCHMARK_API_INTERNAL_H -#define BENCHMARK_API_INTERNAL_H - -#include "benchmark/benchmark.h" - -#include -#include -#include -#include -#include - -namespace benchmark { -namespace internal { - -// Information kept per benchmark we may want to run -struct Benchmark::Instance { - std::string name; - Benchmark* benchmark; - ReportMode report_mode; - std::vector arg; - TimeUnit time_unit; - int range_multiplier; - bool use_real_time; - bool use_manual_time; - BigO complexity; - BigOFunc* complexity_lambda; - UserCounters counters; - const std::vector* statistics; - bool last_benchmark_instance; - int repetitions; - double min_time; - size_t iterations; - int threads; // Number of concurrent threads to us -}; - -bool FindBenchmarksInternal(const std::string& re, - std::vector* benchmarks, - std::ostream* Err); - -bool IsZero(double n); - -ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color = false); - -} // end namespace internal -} // end namespace benchmark - -#endif // BENCHMARK_API_INTERNAL_H diff --git a/benchmarks/thirdparty/benchmark/src/benchmark_main.cc b/benchmarks/thirdparty/benchmark/src/benchmark_main.cc deleted file mode 100755 index b3b2478314..0000000000 --- a/benchmarks/thirdparty/benchmark/src/benchmark_main.cc +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2018 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "benchmark/benchmark.h" - -BENCHMARK_MAIN(); diff --git a/benchmarks/thirdparty/benchmark/src/benchmark_register.cc b/benchmarks/thirdparty/benchmark/src/benchmark_register.cc deleted file mode 100755 index dc6f935685..0000000000 --- a/benchmarks/thirdparty/benchmark/src/benchmark_register.cc +++ /dev/null @@ -1,461 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "benchmark_register.h" - -#ifndef BENCHMARK_OS_WINDOWS -#ifndef BENCHMARK_OS_FUCHSIA -#include -#endif -#include -#include -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "benchmark/benchmark.h" -#include "benchmark_api_internal.h" -#include "check.h" -#include "commandlineflags.h" -#include "complexity.h" -#include "internal_macros.h" -#include "log.h" -#include "mutex.h" -#include "re.h" -#include "statistics.h" -#include "string_util.h" -#include "timers.h" - -namespace benchmark { - -namespace { -// For non-dense Range, intermediate values are powers of kRangeMultiplier. -static const int kRangeMultiplier = 8; -// The size of a benchmark family determines is the number of inputs to repeat -// the benchmark on. If this is "large" then warn the user during configuration. -static const size_t kMaxFamilySize = 100; -} // end namespace - -namespace internal { - -//=============================================================================// -// BenchmarkFamilies -//=============================================================================// - -// Class for managing registered benchmarks. Note that each registered -// benchmark identifies a family of related benchmarks to run. -class BenchmarkFamilies { - public: - static BenchmarkFamilies* GetInstance(); - - // Registers a benchmark family and returns the index assigned to it. - size_t AddBenchmark(std::unique_ptr family); - - // Clear all registered benchmark families. - void ClearBenchmarks(); - - // Extract the list of benchmark instances that match the specified - // regular expression. - bool FindBenchmarks(std::string re, - std::vector* benchmarks, - std::ostream* Err); - - private: - BenchmarkFamilies() {} - - std::vector> families_; - Mutex mutex_; -}; - -BenchmarkFamilies* BenchmarkFamilies::GetInstance() { - static BenchmarkFamilies instance; - return &instance; -} - -size_t BenchmarkFamilies::AddBenchmark(std::unique_ptr family) { - MutexLock l(mutex_); - size_t index = families_.size(); - families_.push_back(std::move(family)); - return index; -} - -void BenchmarkFamilies::ClearBenchmarks() { - MutexLock l(mutex_); - families_.clear(); - families_.shrink_to_fit(); -} - -bool BenchmarkFamilies::FindBenchmarks( - std::string spec, std::vector* benchmarks, - std::ostream* ErrStream) { - CHECK(ErrStream); - auto& Err = *ErrStream; - // Make regular expression out of command-line flag - std::string error_msg; - Regex re; - bool isNegativeFilter = false; - if(spec[0] == '-') { - spec.replace(0, 1, ""); - isNegativeFilter = true; - } - if (!re.Init(spec, &error_msg)) { - Err << "Could not compile benchmark re: " << error_msg << std::endl; - return false; - } - - // Special list of thread counts to use when none are specified - const std::vector one_thread = {1}; - - MutexLock l(mutex_); - for (std::unique_ptr& family : families_) { - // Family was deleted or benchmark doesn't match - if (!family) continue; - - if (family->ArgsCnt() == -1) { - family->Args({}); - } - const std::vector* thread_counts = - (family->thread_counts_.empty() - ? &one_thread - : &static_cast&>(family->thread_counts_)); - const size_t family_size = family->args_.size() * thread_counts->size(); - // The benchmark will be run at least 'family_size' different inputs. - // If 'family_size' is very large warn the user. - if (family_size > kMaxFamilySize) { - Err << "The number of inputs is very large. " << family->name_ - << " will be repeated at least " << family_size << " times.\n"; - } - // reserve in the special case the regex ".", since we know the final - // family size. - if (spec == ".") benchmarks->reserve(family_size); - - for (auto const& args : family->args_) { - for (int num_threads : *thread_counts) { - Benchmark::Instance instance; - instance.name = family->name_; - instance.benchmark = family.get(); - instance.report_mode = family->report_mode_; - instance.arg = args; - instance.time_unit = family->time_unit_; - instance.range_multiplier = family->range_multiplier_; - instance.min_time = family->min_time_; - instance.iterations = family->iterations_; - instance.repetitions = family->repetitions_; - instance.use_real_time = family->use_real_time_; - instance.use_manual_time = family->use_manual_time_; - instance.complexity = family->complexity_; - instance.complexity_lambda = family->complexity_lambda_; - instance.statistics = &family->statistics_; - instance.threads = num_threads; - - // Add arguments to instance name - size_t arg_i = 0; - for (auto const& arg : args) { - instance.name += "/"; - - if (arg_i < family->arg_names_.size()) { - const auto& arg_name = family->arg_names_[arg_i]; - if (!arg_name.empty()) { - instance.name += - StrFormat("%s:", family->arg_names_[arg_i].c_str()); - } - } - - instance.name += StrFormat("%d", arg); - ++arg_i; - } - - if (!IsZero(family->min_time_)) - instance.name += StrFormat("/min_time:%0.3f", family->min_time_); - if (family->iterations_ != 0) - instance.name += StrFormat("/iterations:%d", family->iterations_); - if (family->repetitions_ != 0) - instance.name += StrFormat("/repeats:%d", family->repetitions_); - - if (family->use_manual_time_) { - instance.name += "/manual_time"; - } else if (family->use_real_time_) { - instance.name += "/real_time"; - } - - // Add the number of threads used to the name - if (!family->thread_counts_.empty()) { - instance.name += StrFormat("/threads:%d", instance.threads); - } - - if ((re.Match(instance.name) && !isNegativeFilter) || - (!re.Match(instance.name) && isNegativeFilter)) { - instance.last_benchmark_instance = (&args == &family->args_.back()); - benchmarks->push_back(std::move(instance)); - } - } - } - } - return true; -} - -Benchmark* RegisterBenchmarkInternal(Benchmark* bench) { - std::unique_ptr bench_ptr(bench); - BenchmarkFamilies* families = BenchmarkFamilies::GetInstance(); - families->AddBenchmark(std::move(bench_ptr)); - return bench; -} - -// FIXME: This function is a hack so that benchmark.cc can access -// `BenchmarkFamilies` -bool FindBenchmarksInternal(const std::string& re, - std::vector* benchmarks, - std::ostream* Err) { - return BenchmarkFamilies::GetInstance()->FindBenchmarks(re, benchmarks, Err); -} - -//=============================================================================// -// Benchmark -//=============================================================================// - -Benchmark::Benchmark(const char* name) - : name_(name), - report_mode_(RM_Unspecified), - time_unit_(kNanosecond), - range_multiplier_(kRangeMultiplier), - min_time_(0), - iterations_(0), - repetitions_(0), - use_real_time_(false), - use_manual_time_(false), - complexity_(oNone), - complexity_lambda_(nullptr) { - ComputeStatistics("mean", StatisticsMean); - ComputeStatistics("median", StatisticsMedian); - ComputeStatistics("stddev", StatisticsStdDev); -} - -Benchmark::~Benchmark() {} - -Benchmark* Benchmark::Arg(int64_t x) { - CHECK(ArgsCnt() == -1 || ArgsCnt() == 1); - args_.push_back({x}); - return this; -} - -Benchmark* Benchmark::Unit(TimeUnit unit) { - time_unit_ = unit; - return this; -} - -Benchmark* Benchmark::Range(int64_t start, int64_t limit) { - CHECK(ArgsCnt() == -1 || ArgsCnt() == 1); - std::vector arglist; - AddRange(&arglist, start, limit, range_multiplier_); - - for (int64_t i : arglist) { - args_.push_back({i}); - } - return this; -} - -Benchmark* Benchmark::Ranges( - const std::vector>& ranges) { - CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast(ranges.size())); - std::vector> arglists(ranges.size()); - std::size_t total = 1; - for (std::size_t i = 0; i < ranges.size(); i++) { - AddRange(&arglists[i], ranges[i].first, ranges[i].second, - range_multiplier_); - total *= arglists[i].size(); - } - - std::vector ctr(arglists.size(), 0); - - for (std::size_t i = 0; i < total; i++) { - std::vector tmp; - tmp.reserve(arglists.size()); - - for (std::size_t j = 0; j < arglists.size(); j++) { - tmp.push_back(arglists[j].at(ctr[j])); - } - - args_.push_back(std::move(tmp)); - - for (std::size_t j = 0; j < arglists.size(); j++) { - if (ctr[j] + 1 < arglists[j].size()) { - ++ctr[j]; - break; - } - ctr[j] = 0; - } - } - return this; -} - -Benchmark* Benchmark::ArgName(const std::string& name) { - CHECK(ArgsCnt() == -1 || ArgsCnt() == 1); - arg_names_ = {name}; - return this; -} - -Benchmark* Benchmark::ArgNames(const std::vector& names) { - CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast(names.size())); - arg_names_ = names; - return this; -} - -Benchmark* Benchmark::DenseRange(int64_t start, int64_t limit, int step) { - CHECK(ArgsCnt() == -1 || ArgsCnt() == 1); - CHECK_GE(start, 0); - CHECK_LE(start, limit); - for (int64_t arg = start; arg <= limit; arg += step) { - args_.push_back({arg}); - } - return this; -} - -Benchmark* Benchmark::Args(const std::vector& args) { - CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast(args.size())); - args_.push_back(args); - return this; -} - -Benchmark* Benchmark::Apply(void (*custom_arguments)(Benchmark* benchmark)) { - custom_arguments(this); - return this; -} - -Benchmark* Benchmark::RangeMultiplier(int multiplier) { - CHECK(multiplier > 1); - range_multiplier_ = multiplier; - return this; -} - -Benchmark* Benchmark::MinTime(double t) { - CHECK(t > 0.0); - CHECK(iterations_ == 0); - min_time_ = t; - return this; -} - -Benchmark* Benchmark::Iterations(size_t n) { - CHECK(n > 0); - CHECK(IsZero(min_time_)); - iterations_ = n; - return this; -} - -Benchmark* Benchmark::Repetitions(int n) { - CHECK(n > 0); - repetitions_ = n; - return this; -} - -Benchmark* Benchmark::ReportAggregatesOnly(bool value) { - report_mode_ = value ? RM_ReportAggregatesOnly : RM_Default; - return this; -} - -Benchmark* Benchmark::UseRealTime() { - CHECK(!use_manual_time_) - << "Cannot set UseRealTime and UseManualTime simultaneously."; - use_real_time_ = true; - return this; -} - -Benchmark* Benchmark::UseManualTime() { - CHECK(!use_real_time_) - << "Cannot set UseRealTime and UseManualTime simultaneously."; - use_manual_time_ = true; - return this; -} - -Benchmark* Benchmark::Complexity(BigO complexity) { - complexity_ = complexity; - return this; -} - -Benchmark* Benchmark::Complexity(BigOFunc* complexity) { - complexity_lambda_ = complexity; - complexity_ = oLambda; - return this; -} - -Benchmark* Benchmark::ComputeStatistics(std::string name, - StatisticsFunc* statistics) { - statistics_.emplace_back(name, statistics); - return this; -} - -Benchmark* Benchmark::Threads(int t) { - CHECK_GT(t, 0); - thread_counts_.push_back(t); - return this; -} - -Benchmark* Benchmark::ThreadRange(int min_threads, int max_threads) { - CHECK_GT(min_threads, 0); - CHECK_GE(max_threads, min_threads); - - AddRange(&thread_counts_, min_threads, max_threads, 2); - return this; -} - -Benchmark* Benchmark::DenseThreadRange(int min_threads, int max_threads, - int stride) { - CHECK_GT(min_threads, 0); - CHECK_GE(max_threads, min_threads); - CHECK_GE(stride, 1); - - for (auto i = min_threads; i < max_threads; i += stride) { - thread_counts_.push_back(i); - } - thread_counts_.push_back(max_threads); - return this; -} - -Benchmark* Benchmark::ThreadPerCpu() { - thread_counts_.push_back(CPUInfo::Get().num_cpus); - return this; -} - -void Benchmark::SetName(const char* name) { name_ = name; } - -int Benchmark::ArgsCnt() const { - if (args_.empty()) { - if (arg_names_.empty()) return -1; - return static_cast(arg_names_.size()); - } - return static_cast(args_.front().size()); -} - -//=============================================================================// -// FunctionBenchmark -//=============================================================================// - -void FunctionBenchmark::Run(State& st) { func_(st); } - -} // end namespace internal - -void ClearRegisteredBenchmarks() { - internal::BenchmarkFamilies::GetInstance()->ClearBenchmarks(); -} - -} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/benchmark_register.h b/benchmarks/thirdparty/benchmark/src/benchmark_register.h deleted file mode 100755 index 0705e219f2..0000000000 --- a/benchmarks/thirdparty/benchmark/src/benchmark_register.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef BENCHMARK_REGISTER_H -#define BENCHMARK_REGISTER_H - -#include - -#include "check.h" - -template -void AddRange(std::vector* dst, T lo, T hi, int mult) { - CHECK_GE(lo, 0); - CHECK_GE(hi, lo); - CHECK_GE(mult, 2); - - // Add "lo" - dst->push_back(lo); - - static const T kmax = std::numeric_limits::max(); - - // Now space out the benchmarks in multiples of "mult" - for (T i = 1; i < kmax / mult; i *= mult) { - if (i >= hi) break; - if (i > lo) { - dst->push_back(i); - } - } - - // Add "hi" (if different from "lo") - if (hi != lo) { - dst->push_back(hi); - } -} - -#endif // BENCHMARK_REGISTER_H diff --git a/benchmarks/thirdparty/benchmark/src/check.h b/benchmarks/thirdparty/benchmark/src/check.h deleted file mode 100755 index 73bead2fb5..0000000000 --- a/benchmarks/thirdparty/benchmark/src/check.h +++ /dev/null @@ -1,79 +0,0 @@ -#ifndef CHECK_H_ -#define CHECK_H_ - -#include -#include -#include - -#include "internal_macros.h" -#include "log.h" - -namespace benchmark { -namespace internal { - -typedef void(AbortHandlerT)(); - -inline AbortHandlerT*& GetAbortHandler() { - static AbortHandlerT* handler = &std::abort; - return handler; -} - -BENCHMARK_NORETURN inline void CallAbortHandler() { - GetAbortHandler()(); - std::abort(); // fallback to enforce noreturn -} - -// CheckHandler is the class constructed by failing CHECK macros. CheckHandler -// will log information about the failures and abort when it is destructed. -class CheckHandler { - public: - CheckHandler(const char* check, const char* file, const char* func, int line) - : log_(GetErrorLogInstance()) { - log_ << file << ":" << line << ": " << func << ": Check `" << check - << "' failed. "; - } - - LogType& GetLog() { return log_; } - - BENCHMARK_NORETURN ~CheckHandler() BENCHMARK_NOEXCEPT_OP(false) { - log_ << std::endl; - CallAbortHandler(); - } - - CheckHandler& operator=(const CheckHandler&) = delete; - CheckHandler(const CheckHandler&) = delete; - CheckHandler() = delete; - - private: - LogType& log_; -}; - -} // end namespace internal -} // end namespace benchmark - -// The CHECK macro returns a std::ostream object that can have extra information -// written to it. -#ifndef NDEBUG -#define CHECK(b) \ - (b ? ::benchmark::internal::GetNullLogInstance() \ - : ::benchmark::internal::CheckHandler(#b, __FILE__, __func__, __LINE__) \ - .GetLog()) -#else -#define CHECK(b) ::benchmark::internal::GetNullLogInstance() -#endif - -#define CHECK_EQ(a, b) CHECK((a) == (b)) -#define CHECK_NE(a, b) CHECK((a) != (b)) -#define CHECK_GE(a, b) CHECK((a) >= (b)) -#define CHECK_LE(a, b) CHECK((a) <= (b)) -#define CHECK_GT(a, b) CHECK((a) > (b)) -#define CHECK_LT(a, b) CHECK((a) < (b)) - -#define CHECK_FLOAT_EQ(a, b, eps) CHECK(std::fabs((a) - (b)) < (eps)) -#define CHECK_FLOAT_NE(a, b, eps) CHECK(std::fabs((a) - (b)) >= (eps)) -#define CHECK_FLOAT_GE(a, b, eps) CHECK((a) - (b) > -(eps)) -#define CHECK_FLOAT_LE(a, b, eps) CHECK((b) - (a) > -(eps)) -#define CHECK_FLOAT_GT(a, b, eps) CHECK((a) - (b) > (eps)) -#define CHECK_FLOAT_LT(a, b, eps) CHECK((b) - (a) > (eps)) - -#endif // CHECK_H_ diff --git a/benchmarks/thirdparty/benchmark/src/colorprint.cc b/benchmarks/thirdparty/benchmark/src/colorprint.cc deleted file mode 100755 index 2dec4a8b28..0000000000 --- a/benchmarks/thirdparty/benchmark/src/colorprint.cc +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "colorprint.h" - -#include -#include -#include -#include -#include -#include - -#include "check.h" -#include "internal_macros.h" - -#ifdef BENCHMARK_OS_WINDOWS -#include -#include -#else -#include -#endif // BENCHMARK_OS_WINDOWS - -namespace benchmark { -namespace { -#ifdef BENCHMARK_OS_WINDOWS -typedef WORD PlatformColorCode; -#else -typedef const char* PlatformColorCode; -#endif - -PlatformColorCode GetPlatformColorCode(LogColor color) { -#ifdef BENCHMARK_OS_WINDOWS - switch (color) { - case COLOR_RED: - return FOREGROUND_RED; - case COLOR_GREEN: - return FOREGROUND_GREEN; - case COLOR_YELLOW: - return FOREGROUND_RED | FOREGROUND_GREEN; - case COLOR_BLUE: - return FOREGROUND_BLUE; - case COLOR_MAGENTA: - return FOREGROUND_BLUE | FOREGROUND_RED; - case COLOR_CYAN: - return FOREGROUND_BLUE | FOREGROUND_GREEN; - case COLOR_WHITE: // fall through to default - default: - return 0; - } -#else - switch (color) { - case COLOR_RED: - return "1"; - case COLOR_GREEN: - return "2"; - case COLOR_YELLOW: - return "3"; - case COLOR_BLUE: - return "4"; - case COLOR_MAGENTA: - return "5"; - case COLOR_CYAN: - return "6"; - case COLOR_WHITE: - return "7"; - default: - return nullptr; - }; -#endif -} - -} // end namespace - -std::string FormatString(const char* msg, va_list args) { - // we might need a second shot at this, so pre-emptivly make a copy - va_list args_cp; - va_copy(args_cp, args); - - std::size_t size = 256; - char local_buff[256]; - auto ret = vsnprintf(local_buff, size, msg, args_cp); - - va_end(args_cp); - - // currently there is no error handling for failure, so this is hack. - CHECK(ret >= 0); - - if (ret == 0) // handle empty expansion - return {}; - else if (static_cast(ret) < size) - return local_buff; - else { - // we did not provide a long enough buffer on our first attempt. - size = (size_t)ret + 1; // + 1 for the null byte - std::unique_ptr buff(new char[size]); - ret = vsnprintf(buff.get(), size, msg, args); - CHECK(ret > 0 && ((size_t)ret) < size); - return buff.get(); - } -} - -std::string FormatString(const char* msg, ...) { - va_list args; - va_start(args, msg); - auto tmp = FormatString(msg, args); - va_end(args); - return tmp; -} - -void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, ...) { - va_list args; - va_start(args, fmt); - ColorPrintf(out, color, fmt, args); - va_end(args); -} - -void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, - va_list args) { -#ifdef BENCHMARK_OS_WINDOWS - ((void)out); // suppress unused warning - - const HANDLE stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE); - - // Gets the current text color. - CONSOLE_SCREEN_BUFFER_INFO buffer_info; - GetConsoleScreenBufferInfo(stdout_handle, &buffer_info); - const WORD old_color_attrs = buffer_info.wAttributes; - - // We need to flush the stream buffers into the console before each - // SetConsoleTextAttribute call lest it affect the text that is already - // printed but has not yet reached the console. - fflush(stdout); - SetConsoleTextAttribute(stdout_handle, - GetPlatformColorCode(color) | FOREGROUND_INTENSITY); - vprintf(fmt, args); - - fflush(stdout); - // Restores the text color. - SetConsoleTextAttribute(stdout_handle, old_color_attrs); -#else - const char* color_code = GetPlatformColorCode(color); - if (color_code) out << FormatString("\033[0;3%sm", color_code); - out << FormatString(fmt, args) << "\033[m"; -#endif -} - -bool IsColorTerminal() { -#if BENCHMARK_OS_WINDOWS - // On Windows the TERM variable is usually not set, but the - // console there does support colors. - return 0 != _isatty(_fileno(stdout)); -#else - // On non-Windows platforms, we rely on the TERM variable. This list of - // supported TERM values is copied from Google Test: - // . - const char* const SUPPORTED_TERM_VALUES[] = { - "xterm", "xterm-color", "xterm-256color", - "screen", "screen-256color", "tmux", - "tmux-256color", "rxvt-unicode", "rxvt-unicode-256color", - "linux", "cygwin", - }; - - const char* const term = getenv("TERM"); - - bool term_supports_color = false; - for (const char* candidate : SUPPORTED_TERM_VALUES) { - if (term && 0 == strcmp(term, candidate)) { - term_supports_color = true; - break; - } - } - - return 0 != isatty(fileno(stdout)) && term_supports_color; -#endif // BENCHMARK_OS_WINDOWS -} - -} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/colorprint.h b/benchmarks/thirdparty/benchmark/src/colorprint.h deleted file mode 100755 index 9f6fab9b34..0000000000 --- a/benchmarks/thirdparty/benchmark/src/colorprint.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef BENCHMARK_COLORPRINT_H_ -#define BENCHMARK_COLORPRINT_H_ - -#include -#include -#include - -namespace benchmark { -enum LogColor { - COLOR_DEFAULT, - COLOR_RED, - COLOR_GREEN, - COLOR_YELLOW, - COLOR_BLUE, - COLOR_MAGENTA, - COLOR_CYAN, - COLOR_WHITE -}; - -std::string FormatString(const char* msg, va_list args); -std::string FormatString(const char* msg, ...); - -void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, - va_list args); -void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, ...); - -// Returns true if stdout appears to be a terminal that supports colored -// output, false otherwise. -bool IsColorTerminal(); - -} // end namespace benchmark - -#endif // BENCHMARK_COLORPRINT_H_ diff --git a/benchmarks/thirdparty/benchmark/src/commandlineflags.cc b/benchmarks/thirdparty/benchmark/src/commandlineflags.cc deleted file mode 100755 index 2fc92517a3..0000000000 --- a/benchmarks/thirdparty/benchmark/src/commandlineflags.cc +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "commandlineflags.h" - -#include -#include -#include -#include -#include - -namespace benchmark { -// Parses 'str' for a 32-bit signed integer. If successful, writes -// the result to *value and returns true; otherwise leaves *value -// unchanged and returns false. -bool ParseInt32(const std::string& src_text, const char* str, int32_t* value) { - // Parses the environment variable as a decimal integer. - char* end = nullptr; - const long long_value = strtol(str, &end, 10); // NOLINT - - // Has strtol() consumed all characters in the string? - if (*end != '\0') { - // No - an invalid character was encountered. - std::cerr << src_text << " is expected to be a 32-bit integer, " - << "but actually has value \"" << str << "\".\n"; - return false; - } - - // Is the parsed value in the range of an Int32? - const int32_t result = static_cast(long_value); - if (long_value == std::numeric_limits::max() || - long_value == std::numeric_limits::min() || - // The parsed value overflows as a long. (strtol() returns - // LONG_MAX or LONG_MIN when the input overflows.) - result != long_value - // The parsed value overflows as an Int32. - ) { - std::cerr << src_text << " is expected to be a 32-bit integer, " - << "but actually has value \"" << str << "\", " - << "which overflows.\n"; - return false; - } - - *value = result; - return true; -} - -// Parses 'str' for a double. If successful, writes the result to *value and -// returns true; otherwise leaves *value unchanged and returns false. -bool ParseDouble(const std::string& src_text, const char* str, double* value) { - // Parses the environment variable as a decimal integer. - char* end = nullptr; - const double double_value = strtod(str, &end); // NOLINT - - // Has strtol() consumed all characters in the string? - if (*end != '\0') { - // No - an invalid character was encountered. - std::cerr << src_text << " is expected to be a double, " - << "but actually has value \"" << str << "\".\n"; - return false; - } - - *value = double_value; - return true; -} - -// Returns the name of the environment variable corresponding to the -// given flag. For example, FlagToEnvVar("foo") will return -// "BENCHMARK_FOO" in the open-source version. -static std::string FlagToEnvVar(const char* flag) { - const std::string flag_str(flag); - - std::string env_var; - for (size_t i = 0; i != flag_str.length(); ++i) - env_var += static_cast(::toupper(flag_str.c_str()[i])); - - return "BENCHMARK_" + env_var; -} - -// Reads and returns the Boolean environment variable corresponding to -// the given flag; if it's not set, returns default_value. -// -// The value is considered true iff it's not "0". -bool BoolFromEnv(const char* flag, bool default_value) { - const std::string env_var = FlagToEnvVar(flag); - const char* const string_value = getenv(env_var.c_str()); - return string_value == nullptr ? default_value - : strcmp(string_value, "0") != 0; -} - -// Reads and returns a 32-bit integer stored in the environment -// variable corresponding to the given flag; if it isn't set or -// doesn't represent a valid 32-bit integer, returns default_value. -int32_t Int32FromEnv(const char* flag, int32_t default_value) { - const std::string env_var = FlagToEnvVar(flag); - const char* const string_value = getenv(env_var.c_str()); - if (string_value == nullptr) { - // The environment variable is not set. - return default_value; - } - - int32_t result = default_value; - if (!ParseInt32(std::string("Environment variable ") + env_var, string_value, - &result)) { - std::cout << "The default value " << default_value << " is used.\n"; - return default_value; - } - - return result; -} - -// Reads and returns the string environment variable corresponding to -// the given flag; if it's not set, returns default_value. -const char* StringFromEnv(const char* flag, const char* default_value) { - const std::string env_var = FlagToEnvVar(flag); - const char* const value = getenv(env_var.c_str()); - return value == nullptr ? default_value : value; -} - -// Parses a string as a command line flag. The string should have -// the format "--flag=value". When def_optional is true, the "=value" -// part can be omitted. -// -// Returns the value of the flag, or nullptr if the parsing failed. -const char* ParseFlagValue(const char* str, const char* flag, - bool def_optional) { - // str and flag must not be nullptr. - if (str == nullptr || flag == nullptr) return nullptr; - - // The flag must start with "--". - const std::string flag_str = std::string("--") + std::string(flag); - const size_t flag_len = flag_str.length(); - if (strncmp(str, flag_str.c_str(), flag_len) != 0) return nullptr; - - // Skips the flag name. - const char* flag_end = str + flag_len; - - // When def_optional is true, it's OK to not have a "=value" part. - if (def_optional && (flag_end[0] == '\0')) return flag_end; - - // If def_optional is true and there are more characters after the - // flag name, or if def_optional is false, there must be a '=' after - // the flag name. - if (flag_end[0] != '=') return nullptr; - - // Returns the string after "=". - return flag_end + 1; -} - -bool ParseBoolFlag(const char* str, const char* flag, bool* value) { - // Gets the value of the flag as a string. - const char* const value_str = ParseFlagValue(str, flag, true); - - // Aborts if the parsing failed. - if (value_str == nullptr) return false; - - // Converts the string value to a bool. - *value = IsTruthyFlagValue(value_str); - return true; -} - -bool ParseInt32Flag(const char* str, const char* flag, int32_t* value) { - // Gets the value of the flag as a string. - const char* const value_str = ParseFlagValue(str, flag, false); - - // Aborts if the parsing failed. - if (value_str == nullptr) return false; - - // Sets *value to the value of the flag. - return ParseInt32(std::string("The value of flag --") + flag, value_str, - value); -} - -bool ParseDoubleFlag(const char* str, const char* flag, double* value) { - // Gets the value of the flag as a string. - const char* const value_str = ParseFlagValue(str, flag, false); - - // Aborts if the parsing failed. - if (value_str == nullptr) return false; - - // Sets *value to the value of the flag. - return ParseDouble(std::string("The value of flag --") + flag, value_str, - value); -} - -bool ParseStringFlag(const char* str, const char* flag, std::string* value) { - // Gets the value of the flag as a string. - const char* const value_str = ParseFlagValue(str, flag, false); - - // Aborts if the parsing failed. - if (value_str == nullptr) return false; - - *value = value_str; - return true; -} - -bool IsFlag(const char* str, const char* flag) { - return (ParseFlagValue(str, flag, true) != nullptr); -} - -bool IsTruthyFlagValue(const std::string& value) { - if (value.empty()) return true; - char ch = value[0]; - return isalnum(ch) && - !(ch == '0' || ch == 'f' || ch == 'F' || ch == 'n' || ch == 'N'); -} -} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/commandlineflags.h b/benchmarks/thirdparty/benchmark/src/commandlineflags.h deleted file mode 100755 index 945c9a9fc4..0000000000 --- a/benchmarks/thirdparty/benchmark/src/commandlineflags.h +++ /dev/null @@ -1,79 +0,0 @@ -#ifndef BENCHMARK_COMMANDLINEFLAGS_H_ -#define BENCHMARK_COMMANDLINEFLAGS_H_ - -#include -#include - -// Macro for referencing flags. -#define FLAG(name) FLAGS_##name - -// Macros for declaring flags. -#define DECLARE_bool(name) extern bool FLAG(name) -#define DECLARE_int32(name) extern int32_t FLAG(name) -#define DECLARE_int64(name) extern int64_t FLAG(name) -#define DECLARE_double(name) extern double FLAG(name) -#define DECLARE_string(name) extern std::string FLAG(name) - -// Macros for defining flags. -#define DEFINE_bool(name, default_val, doc) bool FLAG(name) = (default_val) -#define DEFINE_int32(name, default_val, doc) int32_t FLAG(name) = (default_val) -#define DEFINE_int64(name, default_val, doc) int64_t FLAG(name) = (default_val) -#define DEFINE_double(name, default_val, doc) double FLAG(name) = (default_val) -#define DEFINE_string(name, default_val, doc) \ - std::string FLAG(name) = (default_val) - -namespace benchmark { -// Parses 'str' for a 32-bit signed integer. If successful, writes the result -// to *value and returns true; otherwise leaves *value unchanged and returns -// false. -bool ParseInt32(const std::string& src_text, const char* str, int32_t* value); - -// Parses a bool/Int32/string from the environment variable -// corresponding to the given Google Test flag. -bool BoolFromEnv(const char* flag, bool default_val); -int32_t Int32FromEnv(const char* flag, int32_t default_val); -double DoubleFromEnv(const char* flag, double default_val); -const char* StringFromEnv(const char* flag, const char* default_val); - -// Parses a string for a bool flag, in the form of either -// "--flag=value" or "--flag". -// -// In the former case, the value is taken as true if it passes IsTruthyValue(). -// -// In the latter case, the value is taken as true. -// -// On success, stores the value of the flag in *value, and returns -// true. On failure, returns false without changing *value. -bool ParseBoolFlag(const char* str, const char* flag, bool* value); - -// Parses a string for an Int32 flag, in the form of -// "--flag=value". -// -// On success, stores the value of the flag in *value, and returns -// true. On failure, returns false without changing *value. -bool ParseInt32Flag(const char* str, const char* flag, int32_t* value); - -// Parses a string for a Double flag, in the form of -// "--flag=value". -// -// On success, stores the value of the flag in *value, and returns -// true. On failure, returns false without changing *value. -bool ParseDoubleFlag(const char* str, const char* flag, double* value); - -// Parses a string for a string flag, in the form of -// "--flag=value". -// -// On success, stores the value of the flag in *value, and returns -// true. On failure, returns false without changing *value. -bool ParseStringFlag(const char* str, const char* flag, std::string* value); - -// Returns true if the string matches the flag. -bool IsFlag(const char* str, const char* flag); - -// Returns true unless value starts with one of: '0', 'f', 'F', 'n' or 'N', or -// some non-alphanumeric character. As a special case, also returns true if -// value is the empty string. -bool IsTruthyFlagValue(const std::string& value); -} // end namespace benchmark - -#endif // BENCHMARK_COMMANDLINEFLAGS_H_ diff --git a/benchmarks/thirdparty/benchmark/src/complexity.cc b/benchmarks/thirdparty/benchmark/src/complexity.cc deleted file mode 100755 index 97bf6e09b3..0000000000 --- a/benchmarks/thirdparty/benchmark/src/complexity.cc +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2016 Ismael Jimenez Martinez. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Source project : https://github.com/ismaelJimenez/cpp.leastsq -// Adapted to be used with google benchmark - -#include "benchmark/benchmark.h" - -#include -#include -#include "check.h" -#include "complexity.h" - -namespace benchmark { - -// Internal function to calculate the different scalability forms -BigOFunc* FittingCurve(BigO complexity) { - switch (complexity) { - case oN: - return [](int64_t n) -> double { return static_cast(n); }; - case oNSquared: - return [](int64_t n) -> double { return std::pow(n, 2); }; - case oNCubed: - return [](int64_t n) -> double { return std::pow(n, 3); }; - case oLogN: - return [](int64_t n) { return log2(n); }; - case oNLogN: - return [](int64_t n) { return n * log2(n); }; - case o1: - default: - return [](int64_t) { return 1.0; }; - } -} - -// Function to return an string for the calculated complexity -std::string GetBigOString(BigO complexity) { - switch (complexity) { - case oN: - return "N"; - case oNSquared: - return "N^2"; - case oNCubed: - return "N^3"; - case oLogN: - return "lgN"; - case oNLogN: - return "NlgN"; - case o1: - return "(1)"; - default: - return "f(N)"; - } -} - -// Find the coefficient for the high-order term in the running time, by -// minimizing the sum of squares of relative error, for the fitting curve -// given by the lambda expression. -// - n : Vector containing the size of the benchmark tests. -// - time : Vector containing the times for the benchmark tests. -// - fitting_curve : lambda expression (e.g. [](int64_t n) {return n; };). - -// For a deeper explanation on the algorithm logic, look the README file at -// http://github.com/ismaelJimenez/Minimal-Cpp-Least-Squared-Fit - -LeastSq MinimalLeastSq(const std::vector& n, - const std::vector& time, - BigOFunc* fitting_curve) { - double sigma_gn = 0.0; - double sigma_gn_squared = 0.0; - double sigma_time = 0.0; - double sigma_time_gn = 0.0; - - // Calculate least square fitting parameter - for (size_t i = 0; i < n.size(); ++i) { - double gn_i = fitting_curve(n[i]); - sigma_gn += gn_i; - sigma_gn_squared += gn_i * gn_i; - sigma_time += time[i]; - sigma_time_gn += time[i] * gn_i; - } - - LeastSq result; - result.complexity = oLambda; - - // Calculate complexity. - result.coef = sigma_time_gn / sigma_gn_squared; - - // Calculate RMS - double rms = 0.0; - for (size_t i = 0; i < n.size(); ++i) { - double fit = result.coef * fitting_curve(n[i]); - rms += pow((time[i] - fit), 2); - } - - // Normalized RMS by the mean of the observed values - double mean = sigma_time / n.size(); - result.rms = sqrt(rms / n.size()) / mean; - - return result; -} - -// Find the coefficient for the high-order term in the running time, by -// minimizing the sum of squares of relative error. -// - n : Vector containing the size of the benchmark tests. -// - time : Vector containing the times for the benchmark tests. -// - complexity : If different than oAuto, the fitting curve will stick to -// this one. If it is oAuto, it will be calculated the best -// fitting curve. -LeastSq MinimalLeastSq(const std::vector& n, - const std::vector& time, const BigO complexity) { - CHECK_EQ(n.size(), time.size()); - CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two - // benchmark runs are given - CHECK_NE(complexity, oNone); - - LeastSq best_fit; - - if (complexity == oAuto) { - std::vector fit_curves = {oLogN, oN, oNLogN, oNSquared, oNCubed}; - - // Take o1 as default best fitting curve - best_fit = MinimalLeastSq(n, time, FittingCurve(o1)); - best_fit.complexity = o1; - - // Compute all possible fitting curves and stick to the best one - for (const auto& fit : fit_curves) { - LeastSq current_fit = MinimalLeastSq(n, time, FittingCurve(fit)); - if (current_fit.rms < best_fit.rms) { - best_fit = current_fit; - best_fit.complexity = fit; - } - } - } else { - best_fit = MinimalLeastSq(n, time, FittingCurve(complexity)); - best_fit.complexity = complexity; - } - - return best_fit; -} - -std::vector ComputeBigO( - const std::vector& reports) { - typedef BenchmarkReporter::Run Run; - std::vector results; - - if (reports.size() < 2) return results; - - // Accumulators. - std::vector n; - std::vector real_time; - std::vector cpu_time; - - // Populate the accumulators. - for (const Run& run : reports) { - CHECK_GT(run.complexity_n, 0) << "Did you forget to call SetComplexityN?"; - n.push_back(run.complexity_n); - real_time.push_back(run.real_accumulated_time / run.iterations); - cpu_time.push_back(run.cpu_accumulated_time / run.iterations); - } - - LeastSq result_cpu; - LeastSq result_real; - - if (reports[0].complexity == oLambda) { - result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity_lambda); - result_real = MinimalLeastSq(n, real_time, reports[0].complexity_lambda); - } else { - result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity); - result_real = MinimalLeastSq(n, real_time, result_cpu.complexity); - } - std::string benchmark_name = - reports[0].benchmark_name.substr(0, reports[0].benchmark_name.find('/')); - - // Get the data from the accumulator to BenchmarkReporter::Run's. - Run big_o; - big_o.benchmark_name = benchmark_name + "_BigO"; - big_o.iterations = 0; - big_o.real_accumulated_time = result_real.coef; - big_o.cpu_accumulated_time = result_cpu.coef; - big_o.report_big_o = true; - big_o.complexity = result_cpu.complexity; - - // All the time results are reported after being multiplied by the - // time unit multiplier. But since RMS is a relative quantity it - // should not be multiplied at all. So, here, we _divide_ it by the - // multiplier so that when it is multiplied later the result is the - // correct one. - double multiplier = GetTimeUnitMultiplier(reports[0].time_unit); - - // Only add label to mean/stddev if it is same for all runs - Run rms; - big_o.report_label = reports[0].report_label; - rms.benchmark_name = benchmark_name + "_RMS"; - rms.report_label = big_o.report_label; - rms.iterations = 0; - rms.real_accumulated_time = result_real.rms / multiplier; - rms.cpu_accumulated_time = result_cpu.rms / multiplier; - rms.report_rms = true; - rms.complexity = result_cpu.complexity; - // don't forget to keep the time unit, or we won't be able to - // recover the correct value. - rms.time_unit = reports[0].time_unit; - - results.push_back(big_o); - results.push_back(rms); - return results; -} - -} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/complexity.h b/benchmarks/thirdparty/benchmark/src/complexity.h deleted file mode 100755 index df29b48d29..0000000000 --- a/benchmarks/thirdparty/benchmark/src/complexity.h +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2016 Ismael Jimenez Martinez. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Source project : https://github.com/ismaelJimenez/cpp.leastsq -// Adapted to be used with google benchmark - -#ifndef COMPLEXITY_H_ -#define COMPLEXITY_H_ - -#include -#include - -#include "benchmark/benchmark.h" - -namespace benchmark { - -// Return a vector containing the bigO and RMS information for the specified -// list of reports. If 'reports.size() < 2' an empty vector is returned. -std::vector ComputeBigO( - const std::vector& reports); - -// This data structure will contain the result returned by MinimalLeastSq -// - coef : Estimated coeficient for the high-order term as -// interpolated from data. -// - rms : Normalized Root Mean Squared Error. -// - complexity : Scalability form (e.g. oN, oNLogN). In case a scalability -// form has been provided to MinimalLeastSq this will return -// the same value. In case BigO::oAuto has been selected, this -// parameter will return the best fitting curve detected. - -struct LeastSq { - LeastSq() : coef(0.0), rms(0.0), complexity(oNone) {} - - double coef; - double rms; - BigO complexity; -}; - -// Function to return an string for the calculated complexity -std::string GetBigOString(BigO complexity); - -} // end namespace benchmark - -#endif // COMPLEXITY_H_ diff --git a/benchmarks/thirdparty/benchmark/src/console_reporter.cc b/benchmarks/thirdparty/benchmark/src/console_reporter.cc deleted file mode 100755 index 48920ca782..0000000000 --- a/benchmarks/thirdparty/benchmark/src/console_reporter.cc +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "benchmark/benchmark.h" -#include "complexity.h" -#include "counter.h" - -#include -#include -#include -#include -#include -#include -#include - -#include "check.h" -#include "colorprint.h" -#include "commandlineflags.h" -#include "internal_macros.h" -#include "string_util.h" -#include "timers.h" - -namespace benchmark { - -bool ConsoleReporter::ReportContext(const Context& context) { - name_field_width_ = context.name_field_width; - printed_header_ = false; - prev_counters_.clear(); - - PrintBasicContext(&GetErrorStream(), context); - -#ifdef BENCHMARK_OS_WINDOWS - if ((output_options_ & OO_Color) && &std::cout != &GetOutputStream()) { - GetErrorStream() - << "Color printing is only supported for stdout on windows." - " Disabling color printing\n"; - output_options_ = static_cast< OutputOptions >(output_options_ & ~OO_Color); - } -#endif - - return true; -} - -void ConsoleReporter::PrintHeader(const Run& run) { - std::string str = FormatString("%-*s %13s %13s %10s", static_cast(name_field_width_), - "Benchmark", "Time", "CPU", "Iterations"); - if(!run.counters.empty()) { - if(output_options_ & OO_Tabular) { - for(auto const& c : run.counters) { - str += FormatString(" %10s", c.first.c_str()); - } - } else { - str += " UserCounters..."; - } - } - str += "\n"; - std::string line = std::string(str.length(), '-'); - GetOutputStream() << line << "\n" << str << line << "\n"; -} - -void ConsoleReporter::ReportRuns(const std::vector& reports) { - for (const auto& run : reports) { - // print the header: - // --- if none was printed yet - bool print_header = !printed_header_; - // --- or if the format is tabular and this run - // has different fields from the prev header - print_header |= (output_options_ & OO_Tabular) && - (!internal::SameNames(run.counters, prev_counters_)); - if (print_header) { - printed_header_ = true; - prev_counters_ = run.counters; - PrintHeader(run); - } - // As an alternative to printing the headers like this, we could sort - // the benchmarks by header and then print. But this would require - // waiting for the full results before printing, or printing twice. - PrintRunData(run); - } -} - -static void IgnoreColorPrint(std::ostream& out, LogColor, const char* fmt, - ...) { - va_list args; - va_start(args, fmt); - out << FormatString(fmt, args); - va_end(args); -} - -void ConsoleReporter::PrintRunData(const Run& result) { - typedef void(PrinterFn)(std::ostream&, LogColor, const char*, ...); - auto& Out = GetOutputStream(); - PrinterFn* printer = (output_options_ & OO_Color) ? - (PrinterFn*)ColorPrintf : IgnoreColorPrint; - auto name_color = - (result.report_big_o || result.report_rms) ? COLOR_BLUE : COLOR_GREEN; - printer(Out, name_color, "%-*s ", name_field_width_, - result.benchmark_name.c_str()); - - if (result.error_occurred) { - printer(Out, COLOR_RED, "ERROR OCCURRED: \'%s\'", - result.error_message.c_str()); - printer(Out, COLOR_DEFAULT, "\n"); - return; - } - // Format bytes per second - std::string rate; - if (result.bytes_per_second > 0) { - rate = StrCat(" ", HumanReadableNumber(result.bytes_per_second), "B/s"); - } - - // Format items per second - std::string items; - if (result.items_per_second > 0) { - items = - StrCat(" ", HumanReadableNumber(result.items_per_second), " items/s"); - } - - const double real_time = result.GetAdjustedRealTime(); - const double cpu_time = result.GetAdjustedCPUTime(); - - if (result.report_big_o) { - std::string big_o = GetBigOString(result.complexity); - printer(Out, COLOR_YELLOW, "%10.2f %s %10.2f %s ", real_time, big_o.c_str(), - cpu_time, big_o.c_str()); - } else if (result.report_rms) { - printer(Out, COLOR_YELLOW, "%10.0f %% %10.0f %% ", real_time * 100, - cpu_time * 100); - } else { - const char* timeLabel = GetTimeUnitString(result.time_unit); - printer(Out, COLOR_YELLOW, "%10.0f %s %10.0f %s ", real_time, timeLabel, - cpu_time, timeLabel); - } - - if (!result.report_big_o && !result.report_rms) { - printer(Out, COLOR_CYAN, "%10lld", result.iterations); - } - - for (auto& c : result.counters) { - const std::size_t cNameLen = std::max(std::string::size_type(10), - c.first.length()); - auto const& s = HumanReadableNumber(c.second.value, 1000); - if (output_options_ & OO_Tabular) { - if (c.second.flags & Counter::kIsRate) { - printer(Out, COLOR_DEFAULT, " %*s/s", cNameLen - 2, s.c_str()); - } else { - printer(Out, COLOR_DEFAULT, " %*s", cNameLen, s.c_str()); - } - } else { - const char* unit = (c.second.flags & Counter::kIsRate) ? "/s" : ""; - printer(Out, COLOR_DEFAULT, " %s=%s%s", c.first.c_str(), s.c_str(), - unit); - } - } - - if (!rate.empty()) { - printer(Out, COLOR_DEFAULT, " %*s", 13, rate.c_str()); - } - - if (!items.empty()) { - printer(Out, COLOR_DEFAULT, " %*s", 18, items.c_str()); - } - - if (!result.report_label.empty()) { - printer(Out, COLOR_DEFAULT, " %s", result.report_label.c_str()); - } - - printer(Out, COLOR_DEFAULT, "\n"); -} - -} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/counter.cc b/benchmarks/thirdparty/benchmark/src/counter.cc deleted file mode 100755 index ed1aa044ee..0000000000 --- a/benchmarks/thirdparty/benchmark/src/counter.cc +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "counter.h" - -namespace benchmark { -namespace internal { - -double Finish(Counter const& c, double cpu_time, double num_threads) { - double v = c.value; - if (c.flags & Counter::kIsRate) { - v /= cpu_time; - } - if (c.flags & Counter::kAvgThreads) { - v /= num_threads; - } - return v; -} - -void Finish(UserCounters *l, double cpu_time, double num_threads) { - for (auto &c : *l) { - c.second.value = Finish(c.second, cpu_time, num_threads); - } -} - -void Increment(UserCounters *l, UserCounters const& r) { - // add counters present in both or just in *l - for (auto &c : *l) { - auto it = r.find(c.first); - if (it != r.end()) { - c.second.value = c.second + it->second; - } - } - // add counters present in r, but not in *l - for (auto const &tc : r) { - auto it = l->find(tc.first); - if (it == l->end()) { - (*l)[tc.first] = tc.second; - } - } -} - -bool SameNames(UserCounters const& l, UserCounters const& r) { - if (&l == &r) return true; - if (l.size() != r.size()) { - return false; - } - for (auto const& c : l) { - if (r.find(c.first) == r.end()) { - return false; - } - } - return true; -} - -} // end namespace internal -} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/counter.h b/benchmarks/thirdparty/benchmark/src/counter.h deleted file mode 100755 index dd6865a31d..0000000000 --- a/benchmarks/thirdparty/benchmark/src/counter.h +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "benchmark/benchmark.h" - -namespace benchmark { - -// these counter-related functions are hidden to reduce API surface. -namespace internal { -void Finish(UserCounters *l, double time, double num_threads); -void Increment(UserCounters *l, UserCounters const& r); -bool SameNames(UserCounters const& l, UserCounters const& r); -} // end namespace internal - -} //end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/csv_reporter.cc b/benchmarks/thirdparty/benchmark/src/csv_reporter.cc deleted file mode 100755 index 35510645b0..0000000000 --- a/benchmarks/thirdparty/benchmark/src/csv_reporter.cc +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "benchmark/benchmark.h" -#include "complexity.h" - -#include -#include -#include -#include -#include -#include - -#include "string_util.h" -#include "timers.h" -#include "check.h" - -// File format reference: http://edoceo.com/utilitas/csv-file-format. - -namespace benchmark { - -namespace { -std::vector elements = { - "name", "iterations", "real_time", "cpu_time", - "time_unit", "bytes_per_second", "items_per_second", "label", - "error_occurred", "error_message"}; -} // namespace - -bool CSVReporter::ReportContext(const Context& context) { - PrintBasicContext(&GetErrorStream(), context); - return true; -} - -void CSVReporter::ReportRuns(const std::vector & reports) { - std::ostream& Out = GetOutputStream(); - - if (!printed_header_) { - // save the names of all the user counters - for (const auto& run : reports) { - for (const auto& cnt : run.counters) { - user_counter_names_.insert(cnt.first); - } - } - - // print the header - for (auto B = elements.begin(); B != elements.end();) { - Out << *B++; - if (B != elements.end()) Out << ","; - } - for (auto B = user_counter_names_.begin(); B != user_counter_names_.end();) { - Out << ",\"" << *B++ << "\""; - } - Out << "\n"; - - printed_header_ = true; - } else { - // check that all the current counters are saved in the name set - for (const auto& run : reports) { - for (const auto& cnt : run.counters) { - CHECK(user_counter_names_.find(cnt.first) != user_counter_names_.end()) - << "All counters must be present in each run. " - << "Counter named \"" << cnt.first - << "\" was not in a run after being added to the header"; - } - } - } - - // print results for each run - for (const auto& run : reports) { - PrintRunData(run); - } - -} - -void CSVReporter::PrintRunData(const Run & run) { - std::ostream& Out = GetOutputStream(); - - // Field with embedded double-quote characters must be doubled and the field - // delimited with double-quotes. - std::string name = run.benchmark_name; - ReplaceAll(&name, "\"", "\"\""); - Out << '"' << name << "\","; - if (run.error_occurred) { - Out << std::string(elements.size() - 3, ','); - Out << "true,"; - std::string msg = run.error_message; - ReplaceAll(&msg, "\"", "\"\""); - Out << '"' << msg << "\"\n"; - return; - } - - // Do not print iteration on bigO and RMS report - if (!run.report_big_o && !run.report_rms) { - Out << run.iterations; - } - Out << ","; - - Out << run.GetAdjustedRealTime() << ","; - Out << run.GetAdjustedCPUTime() << ","; - - // Do not print timeLabel on bigO and RMS report - if (run.report_big_o) { - Out << GetBigOString(run.complexity); - } else if (!run.report_rms) { - Out << GetTimeUnitString(run.time_unit); - } - Out << ","; - - if (run.bytes_per_second > 0.0) { - Out << run.bytes_per_second; - } - Out << ","; - if (run.items_per_second > 0.0) { - Out << run.items_per_second; - } - Out << ","; - if (!run.report_label.empty()) { - // Field with embedded double-quote characters must be doubled and the field - // delimited with double-quotes. - std::string label = run.report_label; - ReplaceAll(&label, "\"", "\"\""); - Out << "\"" << label << "\""; - } - Out << ",,"; // for error_occurred and error_message - - // Print user counters - for (const auto &ucn : user_counter_names_) { - auto it = run.counters.find(ucn); - if(it == run.counters.end()) { - Out << ","; - } else { - Out << "," << it->second; - } - } - Out << '\n'; -} - -} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/cycleclock.h b/benchmarks/thirdparty/benchmark/src/cycleclock.h deleted file mode 100755 index 3b376ac57d..0000000000 --- a/benchmarks/thirdparty/benchmark/src/cycleclock.h +++ /dev/null @@ -1,177 +0,0 @@ -// ---------------------------------------------------------------------- -// CycleClock -// A CycleClock tells you the current time in Cycles. The "time" -// is actually time since power-on. This is like time() but doesn't -// involve a system call and is much more precise. -// -// NOTE: Not all cpu/platform/kernel combinations guarantee that this -// clock increments at a constant rate or is synchronized across all logical -// cpus in a system. -// -// If you need the above guarantees, please consider using a different -// API. There are efforts to provide an interface which provides a millisecond -// granularity and implemented as a memory read. A memory read is generally -// cheaper than the CycleClock for many architectures. -// -// Also, in some out of order CPU implementations, the CycleClock is not -// serializing. So if you're trying to count at cycles granularity, your -// data might be inaccurate due to out of order instruction execution. -// ---------------------------------------------------------------------- - -#ifndef BENCHMARK_CYCLECLOCK_H_ -#define BENCHMARK_CYCLECLOCK_H_ - -#include - -#include "benchmark/benchmark.h" -#include "internal_macros.h" - -#if defined(BENCHMARK_OS_MACOSX) -#include -#endif -// For MSVC, we want to use '_asm rdtsc' when possible (since it works -// with even ancient MSVC compilers), and when not possible the -// __rdtsc intrinsic, declared in . Unfortunately, in some -// environments, and have conflicting -// declarations of some other intrinsics, breaking compilation. -// Therefore, we simply declare __rdtsc ourselves. See also -// http://connect.microsoft.com/VisualStudio/feedback/details/262047 -#if defined(COMPILER_MSVC) && !defined(_M_IX86) -extern "C" uint64_t __rdtsc(); -#pragma intrinsic(__rdtsc) -#endif - -#ifndef BENCHMARK_OS_WINDOWS -#include -#include -#endif - -#ifdef BENCHMARK_OS_EMSCRIPTEN -#include -#endif - -namespace benchmark { -// NOTE: only i386 and x86_64 have been well tested. -// PPC, sparc, alpha, and ia64 are based on -// http://peter.kuscsik.com/wordpress/?p=14 -// with modifications by m3b. See also -// https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h -namespace cycleclock { -// This should return the number of cycles since power-on. Thread-safe. -inline BENCHMARK_ALWAYS_INLINE int64_t Now() { -#if defined(BENCHMARK_OS_MACOSX) - // this goes at the top because we need ALL Macs, regardless of - // architecture, to return the number of "mach time units" that - // have passed since startup. See sysinfo.cc where - // InitializeSystemInfo() sets the supposed cpu clock frequency of - // macs to the number of mach time units per second, not actual - // CPU clock frequency (which can change in the face of CPU - // frequency scaling). Also note that when the Mac sleeps, this - // counter pauses; it does not continue counting, nor does it - // reset to zero. - return mach_absolute_time(); -#elif defined(BENCHMARK_OS_EMSCRIPTEN) - // this goes above x86-specific code because old versions of Emscripten - // define __x86_64__, although they have nothing to do with it. - return static_cast(emscripten_get_now() * 1e+6); -#elif defined(__i386__) - int64_t ret; - __asm__ volatile("rdtsc" : "=A"(ret)); - return ret; -#elif defined(__x86_64__) || defined(__amd64__) - uint64_t low, high; - __asm__ volatile("rdtsc" : "=a"(low), "=d"(high)); - return (high << 32) | low; -#elif defined(__powerpc__) || defined(__ppc__) - // This returns a time-base, which is not always precisely a cycle-count. - int64_t tbl, tbu0, tbu1; - asm("mftbu %0" : "=r"(tbu0)); - asm("mftb %0" : "=r"(tbl)); - asm("mftbu %0" : "=r"(tbu1)); - tbl &= -static_cast(tbu0 == tbu1); - // high 32 bits in tbu1; low 32 bits in tbl (tbu0 is garbage) - return (tbu1 << 32) | tbl; -#elif defined(__sparc__) - int64_t tick; - asm(".byte 0x83, 0x41, 0x00, 0x00"); - asm("mov %%g1, %0" : "=r"(tick)); - return tick; -#elif defined(__ia64__) - int64_t itc; - asm("mov %0 = ar.itc" : "=r"(itc)); - return itc; -#elif defined(COMPILER_MSVC) && defined(_M_IX86) - // Older MSVC compilers (like 7.x) don't seem to support the - // __rdtsc intrinsic properly, so I prefer to use _asm instead - // when I know it will work. Otherwise, I'll use __rdtsc and hope - // the code is being compiled with a non-ancient compiler. - _asm rdtsc -#elif defined(COMPILER_MSVC) - return __rdtsc(); -#elif defined(BENCHMARK_OS_NACL) - // Native Client validator on x86/x86-64 allows RDTSC instructions, - // and this case is handled above. Native Client validator on ARM - // rejects MRC instructions (used in the ARM-specific sequence below), - // so we handle it here. Portable Native Client compiles to - // architecture-agnostic bytecode, which doesn't provide any - // cycle counter access mnemonics. - - // Native Client does not provide any API to access cycle counter. - // Use clock_gettime(CLOCK_MONOTONIC, ...) instead of gettimeofday - // because is provides nanosecond resolution (which is noticable at - // least for PNaCl modules running on x86 Mac & Linux). - // Initialize to always return 0 if clock_gettime fails. - struct timespec ts = { 0, 0 }; - clock_gettime(CLOCK_MONOTONIC, &ts); - return static_cast(ts.tv_sec) * 1000000000 + ts.tv_nsec; -#elif defined(__aarch64__) - // System timer of ARMv8 runs at a different frequency than the CPU's. - // The frequency is fixed, typically in the range 1-50MHz. It can be - // read at CNTFRQ special register. We assume the OS has set up - // the virtual timer properly. - int64_t virtual_timer_value; - asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value)); - return virtual_timer_value; -#elif defined(__ARM_ARCH) - // V6 is the earliest arch that has a standard cyclecount - // Native Client validator doesn't allow MRC instructions. -#if (__ARM_ARCH >= 6) - uint32_t pmccntr; - uint32_t pmuseren; - uint32_t pmcntenset; - // Read the user mode perf monitor counter access permissions. - asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r"(pmuseren)); - if (pmuseren & 1) { // Allows reading perfmon counters for user mode code. - asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r"(pmcntenset)); - if (pmcntenset & 0x80000000ul) { // Is it counting? - asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r"(pmccntr)); - // The counter is set up to count every 64th cycle - return static_cast(pmccntr) * 64; // Should optimize to << 6 - } - } -#endif - struct timeval tv; - gettimeofday(&tv, nullptr); - return static_cast(tv.tv_sec) * 1000000 + tv.tv_usec; -#elif defined(__mips__) - // mips apparently only allows rdtsc for superusers, so we fall - // back to gettimeofday. It's possible clock_gettime would be better. - struct timeval tv; - gettimeofday(&tv, nullptr); - return static_cast(tv.tv_sec) * 1000000 + tv.tv_usec; -#elif defined(__s390__) // Covers both s390 and s390x. - // Return the CPU clock. - uint64_t tsc; - asm("stck %0" : "=Q" (tsc) : : "cc"); - return tsc; -#else -// The soft failover to a generic implementation is automatic only for ARM. -// For other platforms the developer is expected to make an attempt to create -// a fast implementation and use generic version if nothing better is available. -#error You need to define CycleTimer for your OS and CPU -#endif -} -} // end namespace cycleclock -} // end namespace benchmark - -#endif // BENCHMARK_CYCLECLOCK_H_ diff --git a/benchmarks/thirdparty/benchmark/src/internal_macros.h b/benchmarks/thirdparty/benchmark/src/internal_macros.h deleted file mode 100755 index edb8a5c0a3..0000000000 --- a/benchmarks/thirdparty/benchmark/src/internal_macros.h +++ /dev/null @@ -1,89 +0,0 @@ -#ifndef BENCHMARK_INTERNAL_MACROS_H_ -#define BENCHMARK_INTERNAL_MACROS_H_ - -#include "benchmark/benchmark.h" - -#ifndef __has_feature -#define __has_feature(x) 0 -#endif -#ifndef __has_builtin -#define __has_builtin(x) 0 -#endif - -#if defined(__clang__) - #if !defined(COMPILER_CLANG) - #define COMPILER_CLANG - #endif -#elif defined(_MSC_VER) - #if !defined(COMPILER_MSVC) - #define COMPILER_MSVC - #endif -#elif defined(__GNUC__) - #if !defined(COMPILER_GCC) - #define COMPILER_GCC - #endif -#endif - -#if __has_feature(cxx_attributes) - #define BENCHMARK_NORETURN [[noreturn]] -#elif defined(__GNUC__) - #define BENCHMARK_NORETURN __attribute__((noreturn)) -#elif defined(COMPILER_MSVC) - #define BENCHMARK_NORETURN __declspec(noreturn) -#else - #define BENCHMARK_NORETURN -#endif - -#if defined(__CYGWIN__) - #define BENCHMARK_OS_CYGWIN 1 -#elif defined(_WIN32) - #define BENCHMARK_OS_WINDOWS 1 -#elif defined(__APPLE__) - #define BENCHMARK_OS_APPLE 1 - #include "TargetConditionals.h" - #if defined(TARGET_OS_MAC) - #define BENCHMARK_OS_MACOSX 1 - #if defined(TARGET_OS_IPHONE) - #define BENCHMARK_OS_IOS 1 - #endif - #endif -#elif defined(__FreeBSD__) - #define BENCHMARK_OS_FREEBSD 1 -#elif defined(__NetBSD__) - #define BENCHMARK_OS_NETBSD 1 -#elif defined(__OpenBSD__) - #define BENCHMARK_OS_OPENBSD 1 -#elif defined(__linux__) - #define BENCHMARK_OS_LINUX 1 -#elif defined(__native_client__) - #define BENCHMARK_OS_NACL 1 -#elif defined(__EMSCRIPTEN__) - #define BENCHMARK_OS_EMSCRIPTEN 1 -#elif defined(__rtems__) - #define BENCHMARK_OS_RTEMS 1 -#elif defined(__Fuchsia__) -#define BENCHMARK_OS_FUCHSIA 1 -#elif defined (__SVR4) && defined (__sun) -#define BENCHMARK_OS_SOLARIS 1 -#endif - -#if !__has_feature(cxx_exceptions) && !defined(__cpp_exceptions) \ - && !defined(__EXCEPTIONS) - #define BENCHMARK_HAS_NO_EXCEPTIONS -#endif - -#if defined(COMPILER_CLANG) || defined(COMPILER_GCC) - #define BENCHMARK_MAYBE_UNUSED __attribute__((unused)) -#else - #define BENCHMARK_MAYBE_UNUSED -#endif - -#if defined(COMPILER_GCC) || __has_builtin(__builtin_unreachable) - #define BENCHMARK_UNREACHABLE() __builtin_unreachable() -#elif defined(COMPILER_MSVC) - #define BENCHMARK_UNREACHABLE() __assume(false) -#else - #define BENCHMARK_UNREACHABLE() ((void)0) -#endif - -#endif // BENCHMARK_INTERNAL_MACROS_H_ diff --git a/benchmarks/thirdparty/benchmark/src/json_reporter.cc b/benchmarks/thirdparty/benchmark/src/json_reporter.cc deleted file mode 100755 index 685d6b097d..0000000000 --- a/benchmarks/thirdparty/benchmark/src/json_reporter.cc +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "benchmark/benchmark.h" -#include "complexity.h" - -#include -#include -#include -#include -#include -#include -#include // for setprecision -#include - -#include "string_util.h" -#include "timers.h" - -namespace benchmark { - -namespace { - -std::string FormatKV(std::string const& key, std::string const& value) { - return StrFormat("\"%s\": \"%s\"", key.c_str(), value.c_str()); -} - -std::string FormatKV(std::string const& key, const char* value) { - return StrFormat("\"%s\": \"%s\"", key.c_str(), value); -} - -std::string FormatKV(std::string const& key, bool value) { - return StrFormat("\"%s\": %s", key.c_str(), value ? "true" : "false"); -} - -std::string FormatKV(std::string const& key, int64_t value) { - std::stringstream ss; - ss << '"' << key << "\": " << value; - return ss.str(); -} - -std::string FormatKV(std::string const& key, double value) { - std::stringstream ss; - ss << '"' << key << "\": "; - - const auto max_digits10 = std::numeric_limits::max_digits10; - const auto max_fractional_digits10 = max_digits10 - 1; - - ss << std::scientific << std::setprecision(max_fractional_digits10) << value; - return ss.str(); -} - -int64_t RoundDouble(double v) { return static_cast(v + 0.5); } - -} // end namespace - -bool JSONReporter::ReportContext(const Context& context) { - std::ostream& out = GetOutputStream(); - - out << "{\n"; - std::string inner_indent(2, ' '); - - // Open context block and print context information. - out << inner_indent << "\"context\": {\n"; - std::string indent(4, ' '); - - std::string walltime_value = LocalDateTimeString(); - out << indent << FormatKV("date", walltime_value) << ",\n"; - - if (Context::executable_name) { - out << indent << FormatKV("executable", Context::executable_name) << ",\n"; - } - - CPUInfo const& info = context.cpu_info; - out << indent << FormatKV("num_cpus", static_cast(info.num_cpus)) - << ",\n"; - out << indent - << FormatKV("mhz_per_cpu", - RoundDouble(info.cycles_per_second / 1000000.0)) - << ",\n"; - out << indent << FormatKV("cpu_scaling_enabled", info.scaling_enabled) - << ",\n"; - - out << indent << "\"caches\": [\n"; - indent = std::string(6, ' '); - std::string cache_indent(8, ' '); - for (size_t i = 0; i < info.caches.size(); ++i) { - auto& CI = info.caches[i]; - out << indent << "{\n"; - out << cache_indent << FormatKV("type", CI.type) << ",\n"; - out << cache_indent << FormatKV("level", static_cast(CI.level)) - << ",\n"; - out << cache_indent - << FormatKV("size", static_cast(CI.size) * 1000u) << ",\n"; - out << cache_indent - << FormatKV("num_sharing", static_cast(CI.num_sharing)) - << "\n"; - out << indent << "}"; - if (i != info.caches.size() - 1) out << ","; - out << "\n"; - } - indent = std::string(4, ' '); - out << indent << "],\n"; - -#if defined(NDEBUG) - const char build_type[] = "release"; -#else - const char build_type[] = "debug"; -#endif - out << indent << FormatKV("library_build_type", build_type) << "\n"; - // Close context block and open the list of benchmarks. - out << inner_indent << "},\n"; - out << inner_indent << "\"benchmarks\": [\n"; - return true; -} - -void JSONReporter::ReportRuns(std::vector const& reports) { - if (reports.empty()) { - return; - } - std::string indent(4, ' '); - std::ostream& out = GetOutputStream(); - if (!first_report_) { - out << ",\n"; - } - first_report_ = false; - - for (auto it = reports.begin(); it != reports.end(); ++it) { - out << indent << "{\n"; - PrintRunData(*it); - out << indent << '}'; - auto it_cp = it; - if (++it_cp != reports.end()) { - out << ",\n"; - } - } -} - -void JSONReporter::Finalize() { - // Close the list of benchmarks and the top level object. - GetOutputStream() << "\n ]\n}\n"; -} - -void JSONReporter::PrintRunData(Run const& run) { - std::string indent(6, ' '); - std::ostream& out = GetOutputStream(); - out << indent << FormatKV("name", run.benchmark_name) << ",\n"; - if (run.error_occurred) { - out << indent << FormatKV("error_occurred", run.error_occurred) << ",\n"; - out << indent << FormatKV("error_message", run.error_message) << ",\n"; - } - if (!run.report_big_o && !run.report_rms) { - out << indent << FormatKV("iterations", run.iterations) << ",\n"; - out << indent - << FormatKV("real_time", run.GetAdjustedRealTime()) - << ",\n"; - out << indent - << FormatKV("cpu_time", run.GetAdjustedCPUTime()); - out << ",\n" - << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit)); - } else if (run.report_big_o) { - out << indent - << FormatKV("cpu_coefficient", run.GetAdjustedCPUTime()) - << ",\n"; - out << indent - << FormatKV("real_coefficient", run.GetAdjustedRealTime()) - << ",\n"; - out << indent << FormatKV("big_o", GetBigOString(run.complexity)) << ",\n"; - out << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit)); - } else if (run.report_rms) { - out << indent - << FormatKV("rms", run.GetAdjustedCPUTime()); - } - if (run.bytes_per_second > 0.0) { - out << ",\n" - << indent - << FormatKV("bytes_per_second", run.bytes_per_second); - } - if (run.items_per_second > 0.0) { - out << ",\n" - << indent - << FormatKV("items_per_second", run.items_per_second); - } - for(auto &c : run.counters) { - out << ",\n" - << indent - << FormatKV(c.first, c.second); - } - if (!run.report_label.empty()) { - out << ",\n" << indent << FormatKV("label", run.report_label); - } - out << '\n'; -} - -} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/log.h b/benchmarks/thirdparty/benchmark/src/log.h deleted file mode 100755 index d06e1031db..0000000000 --- a/benchmarks/thirdparty/benchmark/src/log.h +++ /dev/null @@ -1,73 +0,0 @@ -#ifndef BENCHMARK_LOG_H_ -#define BENCHMARK_LOG_H_ - -#include -#include - -#include "benchmark/benchmark.h" - -namespace benchmark { -namespace internal { - -typedef std::basic_ostream&(EndLType)(std::basic_ostream&); - -class LogType { - friend LogType& GetNullLogInstance(); - friend LogType& GetErrorLogInstance(); - - // FIXME: Add locking to output. - template - friend LogType& operator<<(LogType&, Tp const&); - friend LogType& operator<<(LogType&, EndLType*); - - private: - LogType(std::ostream* out) : out_(out) {} - std::ostream* out_; - BENCHMARK_DISALLOW_COPY_AND_ASSIGN(LogType); -}; - -template -LogType& operator<<(LogType& log, Tp const& value) { - if (log.out_) { - *log.out_ << value; - } - return log; -} - -inline LogType& operator<<(LogType& log, EndLType* m) { - if (log.out_) { - *log.out_ << m; - } - return log; -} - -inline int& LogLevel() { - static int log_level = 0; - return log_level; -} - -inline LogType& GetNullLogInstance() { - static LogType log(nullptr); - return log; -} - -inline LogType& GetErrorLogInstance() { - static LogType log(&std::clog); - return log; -} - -inline LogType& GetLogInstanceForLevel(int level) { - if (level <= LogLevel()) { - return GetErrorLogInstance(); - } - return GetNullLogInstance(); -} - -} // end namespace internal -} // end namespace benchmark - -#define VLOG(x) \ - (::benchmark::internal::GetLogInstanceForLevel(x) << "-- LOG(" << x << "):" \ - " ") - -#endif diff --git a/benchmarks/thirdparty/benchmark/src/mutex.h b/benchmarks/thirdparty/benchmark/src/mutex.h deleted file mode 100755 index 5f461d05a0..0000000000 --- a/benchmarks/thirdparty/benchmark/src/mutex.h +++ /dev/null @@ -1,155 +0,0 @@ -#ifndef BENCHMARK_MUTEX_H_ -#define BENCHMARK_MUTEX_H_ - -#include -#include - -#include "check.h" - -// Enable thread safety attributes only with clang. -// The attributes can be safely erased when compiling with other compilers. -#if defined(HAVE_THREAD_SAFETY_ATTRIBUTES) -#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x)) -#else -#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op -#endif - -#define CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(capability(x)) - -#define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable) - -#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x)) - -#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x)) - -#define ACQUIRED_BEFORE(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__)) - -#define ACQUIRED_AFTER(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__)) - -#define REQUIRES(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(requires_capability(__VA_ARGS__)) - -#define REQUIRES_SHARED(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(requires_shared_capability(__VA_ARGS__)) - -#define ACQUIRE(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(acquire_capability(__VA_ARGS__)) - -#define ACQUIRE_SHARED(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(acquire_shared_capability(__VA_ARGS__)) - -#define RELEASE(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(release_capability(__VA_ARGS__)) - -#define RELEASE_SHARED(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(release_shared_capability(__VA_ARGS__)) - -#define TRY_ACQUIRE(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_capability(__VA_ARGS__)) - -#define TRY_ACQUIRE_SHARED(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_shared_capability(__VA_ARGS__)) - -#define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__)) - -#define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(assert_capability(x)) - -#define ASSERT_SHARED_CAPABILITY(x) \ - THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_capability(x)) - -#define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x)) - -#define NO_THREAD_SAFETY_ANALYSIS \ - THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis) - -namespace benchmark { - -typedef std::condition_variable Condition; - -// NOTE: Wrappers for std::mutex and std::unique_lock are provided so that -// we can annotate them with thread safety attributes and use the -// -Wthread-safety warning with clang. The standard library types cannot be -// used directly because they do not provided the required annotations. -class CAPABILITY("mutex") Mutex { - public: - Mutex() {} - - void lock() ACQUIRE() { mut_.lock(); } - void unlock() RELEASE() { mut_.unlock(); } - std::mutex& native_handle() { return mut_; } - - private: - std::mutex mut_; -}; - -class SCOPED_CAPABILITY MutexLock { - typedef std::unique_lock MutexLockImp; - - public: - MutexLock(Mutex& m) ACQUIRE(m) : ml_(m.native_handle()) {} - ~MutexLock() RELEASE() {} - MutexLockImp& native_handle() { return ml_; } - - private: - MutexLockImp ml_; -}; - -class Barrier { - public: - Barrier(int num_threads) : running_threads_(num_threads) {} - - // Called by each thread - bool wait() EXCLUDES(lock_) { - bool last_thread = false; - { - MutexLock ml(lock_); - last_thread = createBarrier(ml); - } - if (last_thread) phase_condition_.notify_all(); - return last_thread; - } - - void removeThread() EXCLUDES(lock_) { - MutexLock ml(lock_); - --running_threads_; - if (entered_ != 0) phase_condition_.notify_all(); - } - - private: - Mutex lock_; - Condition phase_condition_; - int running_threads_; - - // State for barrier management - int phase_number_ = 0; - int entered_ = 0; // Number of threads that have entered this barrier - - // Enter the barrier and wait until all other threads have also - // entered the barrier. Returns iff this is the last thread to - // enter the barrier. - bool createBarrier(MutexLock& ml) REQUIRES(lock_) { - CHECK_LT(entered_, running_threads_); - entered_++; - if (entered_ < running_threads_) { - // Wait for all threads to enter - int phase_number_cp = phase_number_; - auto cb = [this, phase_number_cp]() { - return this->phase_number_ > phase_number_cp || - entered_ == running_threads_; // A thread has aborted in error - }; - phase_condition_.wait(ml.native_handle(), cb); - if (phase_number_ > phase_number_cp) return false; - // else (running_threads_ == entered_) and we are the last thread. - } - // Last thread has reached the barrier - phase_number_++; - entered_ = 0; - return true; - } -}; - -} // end namespace benchmark - -#endif // BENCHMARK_MUTEX_H_ diff --git a/benchmarks/thirdparty/benchmark/src/re.h b/benchmarks/thirdparty/benchmark/src/re.h deleted file mode 100755 index 924d2f0ba7..0000000000 --- a/benchmarks/thirdparty/benchmark/src/re.h +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef BENCHMARK_RE_H_ -#define BENCHMARK_RE_H_ - -#include "internal_macros.h" - -#if !defined(HAVE_STD_REGEX) && \ - !defined(HAVE_GNU_POSIX_REGEX) && \ - !defined(HAVE_POSIX_REGEX) - // No explicit regex selection; detect based on builtin hints. - #if defined(BENCHMARK_OS_LINUX) || defined(BENCHMARK_OS_APPLE) - #define HAVE_POSIX_REGEX 1 - #elif __cplusplus >= 199711L - #define HAVE_STD_REGEX 1 - #endif -#endif - -// Prefer C regex libraries when compiling w/o exceptions so that we can -// correctly report errors. -#if defined(BENCHMARK_HAS_NO_EXCEPTIONS) && \ - defined(BENCHMARK_HAVE_STD_REGEX) && \ - (defined(HAVE_GNU_POSIX_REGEX) || defined(HAVE_POSIX_REGEX)) - #undef HAVE_STD_REGEX -#endif - -#if defined(HAVE_STD_REGEX) - #include -#elif defined(HAVE_GNU_POSIX_REGEX) - #include -#elif defined(HAVE_POSIX_REGEX) - #include -#else -#error No regular expression backend was found! -#endif -#include - -#include "check.h" - -namespace benchmark { - -// A wrapper around the POSIX regular expression API that provides automatic -// cleanup -class Regex { - public: - Regex() : init_(false) {} - - ~Regex(); - - // Compile a regular expression matcher from spec. Returns true on success. - // - // On failure (and if error is not nullptr), error is populated with a human - // readable error message if an error occurs. - bool Init(const std::string& spec, std::string* error); - - // Returns whether str matches the compiled regular expression. - bool Match(const std::string& str); - - private: - bool init_; -// Underlying regular expression object -#if defined(HAVE_STD_REGEX) - std::regex re_; -#elif defined(HAVE_POSIX_REGEX) || defined(HAVE_GNU_POSIX_REGEX) - regex_t re_; -#else - #error No regular expression backend implementation available -#endif -}; - -#if defined(HAVE_STD_REGEX) - -inline bool Regex::Init(const std::string& spec, std::string* error) { -#ifdef BENCHMARK_HAS_NO_EXCEPTIONS - ((void)error); // suppress unused warning -#else - try { -#endif - re_ = std::regex(spec, std::regex_constants::extended); - init_ = true; -#ifndef BENCHMARK_HAS_NO_EXCEPTIONS - } catch (const std::regex_error& e) { - if (error) { - *error = e.what(); - } - } -#endif - return init_; -} - -inline Regex::~Regex() {} - -inline bool Regex::Match(const std::string& str) { - if (!init_) { - return false; - } - return std::regex_search(str, re_); -} - -#else -inline bool Regex::Init(const std::string& spec, std::string* error) { - int ec = regcomp(&re_, spec.c_str(), REG_EXTENDED | REG_NOSUB); - if (ec != 0) { - if (error) { - size_t needed = regerror(ec, &re_, nullptr, 0); - char* errbuf = new char[needed]; - regerror(ec, &re_, errbuf, needed); - - // regerror returns the number of bytes necessary to null terminate - // the string, so we move that when assigning to error. - CHECK_NE(needed, 0); - error->assign(errbuf, needed - 1); - - delete[] errbuf; - } - - return false; - } - - init_ = true; - return true; -} - -inline Regex::~Regex() { - if (init_) { - regfree(&re_); - } -} - -inline bool Regex::Match(const std::string& str) { - if (!init_) { - return false; - } - return regexec(&re_, str.c_str(), 0, nullptr, 0) == 0; -} -#endif - -} // end namespace benchmark - -#endif // BENCHMARK_RE_H_ diff --git a/benchmarks/thirdparty/benchmark/src/reporter.cc b/benchmarks/thirdparty/benchmark/src/reporter.cc deleted file mode 100755 index 4b40aaec8b..0000000000 --- a/benchmarks/thirdparty/benchmark/src/reporter.cc +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "benchmark/benchmark.h" -#include "timers.h" - -#include - -#include -#include -#include - -#include "check.h" - -namespace benchmark { - -BenchmarkReporter::BenchmarkReporter() - : output_stream_(&std::cout), error_stream_(&std::cerr) {} - -BenchmarkReporter::~BenchmarkReporter() {} - -void BenchmarkReporter::PrintBasicContext(std::ostream *out, - Context const &context) { - CHECK(out) << "cannot be null"; - auto &Out = *out; - - Out << LocalDateTimeString() << "\n"; - - if (context.executable_name) - Out << "Running " << context.executable_name << "\n"; - - const CPUInfo &info = context.cpu_info; - Out << "Run on (" << info.num_cpus << " X " - << (info.cycles_per_second / 1000000.0) << " MHz CPU " - << ((info.num_cpus > 1) ? "s" : "") << ")\n"; - if (info.caches.size() != 0) { - Out << "CPU Caches:\n"; - for (auto &CInfo : info.caches) { - Out << " L" << CInfo.level << " " << CInfo.type << " " - << (CInfo.size / 1000) << "K"; - if (CInfo.num_sharing != 0) - Out << " (x" << (info.num_cpus / CInfo.num_sharing) << ")"; - Out << "\n"; - } - } - - if (info.scaling_enabled) { - Out << "***WARNING*** CPU scaling is enabled, the benchmark " - "real time measurements may be noisy and will incur extra " - "overhead.\n"; - } - -#ifndef NDEBUG - Out << "***WARNING*** Library was built as DEBUG. Timings may be " - "affected.\n"; -#endif -} - -// No initializer because it's already initialized to NULL. -const char* BenchmarkReporter::Context::executable_name; - -BenchmarkReporter::Context::Context() : cpu_info(CPUInfo::Get()) {} - -double BenchmarkReporter::Run::GetAdjustedRealTime() const { - double new_time = real_accumulated_time * GetTimeUnitMultiplier(time_unit); - if (iterations != 0) new_time /= static_cast(iterations); - return new_time; -} - -double BenchmarkReporter::Run::GetAdjustedCPUTime() const { - double new_time = cpu_accumulated_time * GetTimeUnitMultiplier(time_unit); - if (iterations != 0) new_time /= static_cast(iterations); - return new_time; -} - -} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/sleep.cc b/benchmarks/thirdparty/benchmark/src/sleep.cc deleted file mode 100755 index 54aa04a422..0000000000 --- a/benchmarks/thirdparty/benchmark/src/sleep.cc +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "sleep.h" - -#include -#include -#include - -#include "internal_macros.h" - -#ifdef BENCHMARK_OS_WINDOWS -#include -#endif - -namespace benchmark { -#ifdef BENCHMARK_OS_WINDOWS -// Window's Sleep takes milliseconds argument. -void SleepForMilliseconds(int milliseconds) { Sleep(milliseconds); } -void SleepForSeconds(double seconds) { - SleepForMilliseconds(static_cast(kNumMillisPerSecond * seconds)); -} -#else // BENCHMARK_OS_WINDOWS -void SleepForMicroseconds(int microseconds) { - struct timespec sleep_time; - sleep_time.tv_sec = microseconds / kNumMicrosPerSecond; - sleep_time.tv_nsec = (microseconds % kNumMicrosPerSecond) * kNumNanosPerMicro; - while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR) - ; // Ignore signals and wait for the full interval to elapse. -} - -void SleepForMilliseconds(int milliseconds) { - SleepForMicroseconds(milliseconds * kNumMicrosPerMilli); -} - -void SleepForSeconds(double seconds) { - SleepForMicroseconds(static_cast(seconds * kNumMicrosPerSecond)); -} -#endif // BENCHMARK_OS_WINDOWS -} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/sleep.h b/benchmarks/thirdparty/benchmark/src/sleep.h deleted file mode 100755 index f98551afe2..0000000000 --- a/benchmarks/thirdparty/benchmark/src/sleep.h +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef BENCHMARK_SLEEP_H_ -#define BENCHMARK_SLEEP_H_ - -namespace benchmark { -const int kNumMillisPerSecond = 1000; -const int kNumMicrosPerMilli = 1000; -const int kNumMicrosPerSecond = kNumMillisPerSecond * 1000; -const int kNumNanosPerMicro = 1000; -const int kNumNanosPerSecond = kNumNanosPerMicro * kNumMicrosPerSecond; - -void SleepForMilliseconds(int milliseconds); -void SleepForSeconds(double seconds); -} // end namespace benchmark - -#endif // BENCHMARK_SLEEP_H_ diff --git a/benchmarks/thirdparty/benchmark/src/statistics.cc b/benchmarks/thirdparty/benchmark/src/statistics.cc deleted file mode 100755 index 1c91e1015a..0000000000 --- a/benchmarks/thirdparty/benchmark/src/statistics.cc +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2016 Ismael Jimenez Martinez. All rights reserved. -// Copyright 2017 Roman Lebedev. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "benchmark/benchmark.h" - -#include -#include -#include -#include -#include -#include "check.h" -#include "statistics.h" - -namespace benchmark { - -auto StatisticsSum = [](const std::vector& v) { - return std::accumulate(v.begin(), v.end(), 0.0); -}; - -double StatisticsMean(const std::vector& v) { - if (v.empty()) return 0.0; - return StatisticsSum(v) * (1.0 / v.size()); -} - -double StatisticsMedian(const std::vector& v) { - if (v.size() < 3) return StatisticsMean(v); - std::vector copy(v); - - auto center = copy.begin() + v.size() / 2; - std::nth_element(copy.begin(), center, copy.end()); - - // did we have an odd number of samples? - // if yes, then center is the median - // it no, then we are looking for the average between center and the value before - if(v.size() % 2 == 1) - return *center; - auto center2 = copy.begin() + v.size() / 2 - 1; - std::nth_element(copy.begin(), center2, copy.end()); - return (*center + *center2) / 2.0; -} - -// Return the sum of the squares of this sample set -auto SumSquares = [](const std::vector& v) { - return std::inner_product(v.begin(), v.end(), v.begin(), 0.0); -}; - -auto Sqr = [](const double dat) { return dat * dat; }; -auto Sqrt = [](const double dat) { - // Avoid NaN due to imprecision in the calculations - if (dat < 0.0) return 0.0; - return std::sqrt(dat); -}; - -double StatisticsStdDev(const std::vector& v) { - const auto mean = StatisticsMean(v); - if (v.empty()) return mean; - - // Sample standard deviation is undefined for n = 1 - if (v.size() == 1) - return 0.0; - - const double avg_squares = SumSquares(v) * (1.0 / v.size()); - return Sqrt(v.size() / (v.size() - 1.0) * (avg_squares - Sqr(mean))); -} - -std::vector ComputeStats( - const std::vector& reports) { - typedef BenchmarkReporter::Run Run; - std::vector results; - - auto error_count = - std::count_if(reports.begin(), reports.end(), - [](Run const& run) { return run.error_occurred; }); - - if (reports.size() - error_count < 2) { - // We don't report aggregated data if there was a single run. - return results; - } - - // Accumulators. - std::vector real_accumulated_time_stat; - std::vector cpu_accumulated_time_stat; - std::vector bytes_per_second_stat; - std::vector items_per_second_stat; - - real_accumulated_time_stat.reserve(reports.size()); - cpu_accumulated_time_stat.reserve(reports.size()); - bytes_per_second_stat.reserve(reports.size()); - items_per_second_stat.reserve(reports.size()); - - // All repetitions should be run with the same number of iterations so we - // can take this information from the first benchmark. - int64_t const run_iterations = reports.front().iterations; - // create stats for user counters - struct CounterStat { - Counter c; - std::vector s; - }; - std::map< std::string, CounterStat > counter_stats; - for(Run const& r : reports) { - for(auto const& cnt : r.counters) { - auto it = counter_stats.find(cnt.first); - if(it == counter_stats.end()) { - counter_stats.insert({cnt.first, {cnt.second, std::vector{}}}); - it = counter_stats.find(cnt.first); - it->second.s.reserve(reports.size()); - } else { - CHECK_EQ(counter_stats[cnt.first].c.flags, cnt.second.flags); - } - } - } - - // Populate the accumulators. - for (Run const& run : reports) { - CHECK_EQ(reports[0].benchmark_name, run.benchmark_name); - CHECK_EQ(run_iterations, run.iterations); - if (run.error_occurred) continue; - real_accumulated_time_stat.emplace_back(run.real_accumulated_time); - cpu_accumulated_time_stat.emplace_back(run.cpu_accumulated_time); - items_per_second_stat.emplace_back(run.items_per_second); - bytes_per_second_stat.emplace_back(run.bytes_per_second); - // user counters - for(auto const& cnt : run.counters) { - auto it = counter_stats.find(cnt.first); - CHECK_NE(it, counter_stats.end()); - it->second.s.emplace_back(cnt.second); - } - } - - // Only add label if it is same for all runs - std::string report_label = reports[0].report_label; - for (std::size_t i = 1; i < reports.size(); i++) { - if (reports[i].report_label != report_label) { - report_label = ""; - break; - } - } - - for(const auto& Stat : *reports[0].statistics) { - // Get the data from the accumulator to BenchmarkReporter::Run's. - Run data; - data.benchmark_name = reports[0].benchmark_name + "_" + Stat.name_; - data.report_label = report_label; - data.iterations = run_iterations; - - data.real_accumulated_time = Stat.compute_(real_accumulated_time_stat); - data.cpu_accumulated_time = Stat.compute_(cpu_accumulated_time_stat); - data.bytes_per_second = Stat.compute_(bytes_per_second_stat); - data.items_per_second = Stat.compute_(items_per_second_stat); - - data.time_unit = reports[0].time_unit; - - // user counters - for(auto const& kv : counter_stats) { - const auto uc_stat = Stat.compute_(kv.second.s); - auto c = Counter(uc_stat, counter_stats[kv.first].c.flags); - data.counters[kv.first] = c; - } - - results.push_back(data); - } - - return results; -} - -} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/statistics.h b/benchmarks/thirdparty/benchmark/src/statistics.h deleted file mode 100755 index 7eccc85536..0000000000 --- a/benchmarks/thirdparty/benchmark/src/statistics.h +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2016 Ismael Jimenez Martinez. All rights reserved. -// Copyright 2017 Roman Lebedev. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef STATISTICS_H_ -#define STATISTICS_H_ - -#include - -#include "benchmark/benchmark.h" - -namespace benchmark { - -// Return a vector containing the mean, median and standard devation information -// (and any user-specified info) for the specified list of reports. If 'reports' -// contains less than two non-errored runs an empty vector is returned -std::vector ComputeStats( - const std::vector& reports); - -double StatisticsMean(const std::vector& v); -double StatisticsMedian(const std::vector& v); -double StatisticsStdDev(const std::vector& v); - -} // end namespace benchmark - -#endif // STATISTICS_H_ diff --git a/benchmarks/thirdparty/benchmark/src/string_util.cc b/benchmarks/thirdparty/benchmark/src/string_util.cc deleted file mode 100755 index ebc3acebd2..0000000000 --- a/benchmarks/thirdparty/benchmark/src/string_util.cc +++ /dev/null @@ -1,172 +0,0 @@ -#include "string_util.h" - -#include -#include -#include -#include -#include -#include - -#include "arraysize.h" - -namespace benchmark { -namespace { - -// kilo, Mega, Giga, Tera, Peta, Exa, Zetta, Yotta. -const char kBigSIUnits[] = "kMGTPEZY"; -// Kibi, Mebi, Gibi, Tebi, Pebi, Exbi, Zebi, Yobi. -const char kBigIECUnits[] = "KMGTPEZY"; -// milli, micro, nano, pico, femto, atto, zepto, yocto. -const char kSmallSIUnits[] = "munpfazy"; - -// We require that all three arrays have the same size. -static_assert(arraysize(kBigSIUnits) == arraysize(kBigIECUnits), - "SI and IEC unit arrays must be the same size"); -static_assert(arraysize(kSmallSIUnits) == arraysize(kBigSIUnits), - "Small SI and Big SI unit arrays must be the same size"); - -static const int64_t kUnitsSize = arraysize(kBigSIUnits); - -void ToExponentAndMantissa(double val, double thresh, int precision, - double one_k, std::string* mantissa, - int64_t* exponent) { - std::stringstream mantissa_stream; - - if (val < 0) { - mantissa_stream << "-"; - val = -val; - } - - // Adjust threshold so that it never excludes things which can't be rendered - // in 'precision' digits. - const double adjusted_threshold = - std::max(thresh, 1.0 / std::pow(10.0, precision)); - const double big_threshold = adjusted_threshold * one_k; - const double small_threshold = adjusted_threshold; - // Values in ]simple_threshold,small_threshold[ will be printed as-is - const double simple_threshold = 0.01; - - if (val > big_threshold) { - // Positive powers - double scaled = val; - for (size_t i = 0; i < arraysize(kBigSIUnits); ++i) { - scaled /= one_k; - if (scaled <= big_threshold) { - mantissa_stream << scaled; - *exponent = i + 1; - *mantissa = mantissa_stream.str(); - return; - } - } - mantissa_stream << val; - *exponent = 0; - } else if (val < small_threshold) { - // Negative powers - if (val < simple_threshold) { - double scaled = val; - for (size_t i = 0; i < arraysize(kSmallSIUnits); ++i) { - scaled *= one_k; - if (scaled >= small_threshold) { - mantissa_stream << scaled; - *exponent = -static_cast(i + 1); - *mantissa = mantissa_stream.str(); - return; - } - } - } - mantissa_stream << val; - *exponent = 0; - } else { - mantissa_stream << val; - *exponent = 0; - } - *mantissa = mantissa_stream.str(); -} - -std::string ExponentToPrefix(int64_t exponent, bool iec) { - if (exponent == 0) return ""; - - const int64_t index = (exponent > 0 ? exponent - 1 : -exponent - 1); - if (index >= kUnitsSize) return ""; - - const char* array = - (exponent > 0 ? (iec ? kBigIECUnits : kBigSIUnits) : kSmallSIUnits); - if (iec) - return array[index] + std::string("i"); - else - return std::string(1, array[index]); -} - -std::string ToBinaryStringFullySpecified(double value, double threshold, - int precision, double one_k = 1024.0) { - std::string mantissa; - int64_t exponent; - ToExponentAndMantissa(value, threshold, precision, one_k, &mantissa, - &exponent); - return mantissa + ExponentToPrefix(exponent, false); -} - -} // end namespace - -void AppendHumanReadable(int n, std::string* str) { - std::stringstream ss; - // Round down to the nearest SI prefix. - ss << ToBinaryStringFullySpecified(n, 1.0, 0); - *str += ss.str(); -} - -std::string HumanReadableNumber(double n, double one_k) { - // 1.1 means that figures up to 1.1k should be shown with the next unit down; - // this softens edge effects. - // 1 means that we should show one decimal place of precision. - return ToBinaryStringFullySpecified(n, 1.1, 1, one_k); -} - -std::string StrFormatImp(const char* msg, va_list args) { - // we might need a second shot at this, so pre-emptivly make a copy - va_list args_cp; - va_copy(args_cp, args); - - // TODO(ericwf): use std::array for first attempt to avoid one memory - // allocation guess what the size might be - std::array local_buff; - std::size_t size = local_buff.size(); - // 2015-10-08: vsnprintf is used instead of snd::vsnprintf due to a limitation - // in the android-ndk - auto ret = vsnprintf(local_buff.data(), size, msg, args_cp); - - va_end(args_cp); - - // handle empty expansion - if (ret == 0) return std::string{}; - if (static_cast(ret) < size) - return std::string(local_buff.data()); - - // we did not provide a long enough buffer on our first attempt. - // add 1 to size to account for null-byte in size cast to prevent overflow - size = static_cast(ret) + 1; - auto buff_ptr = std::unique_ptr(new char[size]); - // 2015-10-08: vsnprintf is used instead of snd::vsnprintf due to a limitation - // in the android-ndk - ret = vsnprintf(buff_ptr.get(), size, msg, args); - return std::string(buff_ptr.get()); -} - -std::string StrFormat(const char* format, ...) { - va_list args; - va_start(args, format); - std::string tmp = StrFormatImp(format, args); - va_end(args); - return tmp; -} - -void ReplaceAll(std::string* str, const std::string& from, - const std::string& to) { - std::size_t start = 0; - while ((start = str->find(from, start)) != std::string::npos) { - str->replace(start, from.length(), to); - start += to.length(); - } -} - -} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/string_util.h b/benchmarks/thirdparty/benchmark/src/string_util.h deleted file mode 100755 index e70e769872..0000000000 --- a/benchmarks/thirdparty/benchmark/src/string_util.h +++ /dev/null @@ -1,40 +0,0 @@ -#ifndef BENCHMARK_STRING_UTIL_H_ -#define BENCHMARK_STRING_UTIL_H_ - -#include -#include -#include -#include "internal_macros.h" - -namespace benchmark { - -void AppendHumanReadable(int n, std::string* str); - -std::string HumanReadableNumber(double n, double one_k = 1024.0); - -std::string StrFormat(const char* format, ...); - -inline std::ostream& StrCatImp(std::ostream& out) BENCHMARK_NOEXCEPT { - return out; -} - -template -inline std::ostream& StrCatImp(std::ostream& out, First&& f, - Rest&&... rest) { - out << std::forward(f); - return StrCatImp(out, std::forward(rest)...); -} - -template -inline std::string StrCat(Args&&... args) { - std::ostringstream ss; - StrCatImp(ss, std::forward(args)...); - return ss.str(); -} - -void ReplaceAll(std::string* str, const std::string& from, - const std::string& to); - -} // end namespace benchmark - -#endif // BENCHMARK_STRING_UTIL_H_ diff --git a/benchmarks/thirdparty/benchmark/src/sysinfo.cc b/benchmarks/thirdparty/benchmark/src/sysinfo.cc deleted file mode 100755 index d19d0ef4c1..0000000000 --- a/benchmarks/thirdparty/benchmark/src/sysinfo.cc +++ /dev/null @@ -1,587 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "internal_macros.h" - -#ifdef BENCHMARK_OS_WINDOWS -#include -#undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA -#include -#include -#else -#include -#ifndef BENCHMARK_OS_FUCHSIA -#include -#endif -#include -#include // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD -#include -#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX || \ - defined BENCHMARK_OS_NETBSD || defined BENCHMARK_OS_OPENBSD -#define BENCHMARK_HAS_SYSCTL -#include -#endif -#endif -#if defined(BENCHMARK_OS_SOLARIS) -#include -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "check.h" -#include "cycleclock.h" -#include "internal_macros.h" -#include "log.h" -#include "sleep.h" -#include "string_util.h" - -namespace benchmark { -namespace { - -void PrintImp(std::ostream& out) { out << std::endl; } - -template -void PrintImp(std::ostream& out, First&& f, Rest&&... rest) { - out << std::forward(f); - PrintImp(out, std::forward(rest)...); -} - -template -BENCHMARK_NORETURN void PrintErrorAndDie(Args&&... args) { - PrintImp(std::cerr, std::forward(args)...); - std::exit(EXIT_FAILURE); -} - -#ifdef BENCHMARK_HAS_SYSCTL - -/// ValueUnion - A type used to correctly alias the byte-for-byte output of -/// `sysctl` with the result type it's to be interpreted as. -struct ValueUnion { - union DataT { - uint32_t uint32_value; - uint64_t uint64_value; - // For correct aliasing of union members from bytes. - char bytes[8]; - }; - using DataPtr = std::unique_ptr; - - // The size of the data union member + its trailing array size. - size_t Size; - DataPtr Buff; - - public: - ValueUnion() : Size(0), Buff(nullptr, &std::free) {} - - explicit ValueUnion(size_t BuffSize) - : Size(sizeof(DataT) + BuffSize), - Buff(::new (std::malloc(Size)) DataT(), &std::free) {} - - ValueUnion(ValueUnion&& other) = default; - - explicit operator bool() const { return bool(Buff); } - - char* data() const { return Buff->bytes; } - - std::string GetAsString() const { return std::string(data()); } - - int64_t GetAsInteger() const { - if (Size == sizeof(Buff->uint32_value)) - return static_cast(Buff->uint32_value); - else if (Size == sizeof(Buff->uint64_value)) - return static_cast(Buff->uint64_value); - BENCHMARK_UNREACHABLE(); - } - - uint64_t GetAsUnsigned() const { - if (Size == sizeof(Buff->uint32_value)) - return Buff->uint32_value; - else if (Size == sizeof(Buff->uint64_value)) - return Buff->uint64_value; - BENCHMARK_UNREACHABLE(); - } - - template - std::array GetAsArray() { - const int ArrSize = sizeof(T) * N; - CHECK_LE(ArrSize, Size); - std::array Arr; - std::memcpy(Arr.data(), data(), ArrSize); - return Arr; - } -}; - -ValueUnion GetSysctlImp(std::string const& Name) { -#if defined BENCHMARK_OS_OPENBSD - int mib[2]; - - mib[0] = CTL_HW; - if ((Name == "hw.ncpu") || (Name == "hw.cpuspeed")){ - ValueUnion buff(sizeof(int)); - - if (Name == "hw.ncpu") { - mib[1] = HW_NCPU; - } else { - mib[1] = HW_CPUSPEED; - } - - if (sysctl(mib, 2, buff.data(), &buff.Size, nullptr, 0) == -1) { - return ValueUnion(); - } - return buff; - } - return ValueUnion(); -#else - size_t CurBuffSize = 0; - if (sysctlbyname(Name.c_str(), nullptr, &CurBuffSize, nullptr, 0) == -1) - return ValueUnion(); - - ValueUnion buff(CurBuffSize); - if (sysctlbyname(Name.c_str(), buff.data(), &buff.Size, nullptr, 0) == 0) - return buff; - return ValueUnion(); -#endif -} - -BENCHMARK_MAYBE_UNUSED -bool GetSysctl(std::string const& Name, std::string* Out) { - Out->clear(); - auto Buff = GetSysctlImp(Name); - if (!Buff) return false; - Out->assign(Buff.data()); - return true; -} - -template ::value>::type> -bool GetSysctl(std::string const& Name, Tp* Out) { - *Out = 0; - auto Buff = GetSysctlImp(Name); - if (!Buff) return false; - *Out = static_cast(Buff.GetAsUnsigned()); - return true; -} - -template -bool GetSysctl(std::string const& Name, std::array* Out) { - auto Buff = GetSysctlImp(Name); - if (!Buff) return false; - *Out = Buff.GetAsArray(); - return true; -} -#endif - -template -bool ReadFromFile(std::string const& fname, ArgT* arg) { - *arg = ArgT(); - std::ifstream f(fname.c_str()); - if (!f.is_open()) return false; - f >> *arg; - return f.good(); -} - -bool CpuScalingEnabled(int num_cpus) { - // We don't have a valid CPU count, so don't even bother. - if (num_cpus <= 0) return false; -#ifndef BENCHMARK_OS_WINDOWS - // On Linux, the CPUfreq subsystem exposes CPU information as files on the - // local file system. If reading the exported files fails, then we may not be - // running on Linux, so we silently ignore all the read errors. - std::string res; - for (int cpu = 0; cpu < num_cpus; ++cpu) { - std::string governor_file = - StrCat("/sys/devices/system/cpu/cpu", cpu, "/cpufreq/scaling_governor"); - if (ReadFromFile(governor_file, &res) && res != "performance") return true; - } -#endif - return false; -} - -int CountSetBitsInCPUMap(std::string Val) { - auto CountBits = [](std::string Part) { - using CPUMask = std::bitset; - Part = "0x" + Part; - CPUMask Mask(std::stoul(Part, nullptr, 16)); - return static_cast(Mask.count()); - }; - size_t Pos; - int total = 0; - while ((Pos = Val.find(',')) != std::string::npos) { - total += CountBits(Val.substr(0, Pos)); - Val = Val.substr(Pos + 1); - } - if (!Val.empty()) { - total += CountBits(Val); - } - return total; -} - -BENCHMARK_MAYBE_UNUSED -std::vector GetCacheSizesFromKVFS() { - std::vector res; - std::string dir = "/sys/devices/system/cpu/cpu0/cache/"; - int Idx = 0; - while (true) { - CPUInfo::CacheInfo info; - std::string FPath = StrCat(dir, "index", Idx++, "/"); - std::ifstream f(StrCat(FPath, "size").c_str()); - if (!f.is_open()) break; - std::string suffix; - f >> info.size; - if (f.fail()) - PrintErrorAndDie("Failed while reading file '", FPath, "size'"); - if (f.good()) { - f >> suffix; - if (f.bad()) - PrintErrorAndDie( - "Invalid cache size format: failed to read size suffix"); - else if (f && suffix != "K") - PrintErrorAndDie("Invalid cache size format: Expected bytes ", suffix); - else if (suffix == "K") - info.size *= 1000; - } - if (!ReadFromFile(StrCat(FPath, "type"), &info.type)) - PrintErrorAndDie("Failed to read from file ", FPath, "type"); - if (!ReadFromFile(StrCat(FPath, "level"), &info.level)) - PrintErrorAndDie("Failed to read from file ", FPath, "level"); - std::string map_str; - if (!ReadFromFile(StrCat(FPath, "shared_cpu_map"), &map_str)) - PrintErrorAndDie("Failed to read from file ", FPath, "shared_cpu_map"); - info.num_sharing = CountSetBitsInCPUMap(map_str); - res.push_back(info); - } - - return res; -} - -#ifdef BENCHMARK_OS_MACOSX -std::vector GetCacheSizesMacOSX() { - std::vector res; - std::array CacheCounts{{0, 0, 0, 0}}; - GetSysctl("hw.cacheconfig", &CacheCounts); - - struct { - std::string name; - std::string type; - int level; - size_t num_sharing; - } Cases[] = {{"hw.l1dcachesize", "Data", 1, CacheCounts[1]}, - {"hw.l1icachesize", "Instruction", 1, CacheCounts[1]}, - {"hw.l2cachesize", "Unified", 2, CacheCounts[2]}, - {"hw.l3cachesize", "Unified", 3, CacheCounts[3]}}; - for (auto& C : Cases) { - int val; - if (!GetSysctl(C.name, &val)) continue; - CPUInfo::CacheInfo info; - info.type = C.type; - info.level = C.level; - info.size = val; - info.num_sharing = static_cast(C.num_sharing); - res.push_back(std::move(info)); - } - return res; -} -#elif defined(BENCHMARK_OS_WINDOWS) -std::vector GetCacheSizesWindows() { - std::vector res; - DWORD buffer_size = 0; - using PInfo = SYSTEM_LOGICAL_PROCESSOR_INFORMATION; - using CInfo = CACHE_DESCRIPTOR; - - using UPtr = std::unique_ptr; - GetLogicalProcessorInformation(nullptr, &buffer_size); - UPtr buff((PInfo*)malloc(buffer_size), &std::free); - if (!GetLogicalProcessorInformation(buff.get(), &buffer_size)) - PrintErrorAndDie("Failed during call to GetLogicalProcessorInformation: ", - GetLastError()); - - PInfo* it = buff.get(); - PInfo* end = buff.get() + (buffer_size / sizeof(PInfo)); - - for (; it != end; ++it) { - if (it->Relationship != RelationCache) continue; - using BitSet = std::bitset; - BitSet B(it->ProcessorMask); - // To prevent duplicates, only consider caches where CPU 0 is specified - if (!B.test(0)) continue; - CInfo* Cache = &it->Cache; - CPUInfo::CacheInfo C; - C.num_sharing = static_cast(B.count()); - C.level = Cache->Level; - C.size = Cache->Size; - switch (Cache->Type) { - case CacheUnified: - C.type = "Unified"; - break; - case CacheInstruction: - C.type = "Instruction"; - break; - case CacheData: - C.type = "Data"; - break; - case CacheTrace: - C.type = "Trace"; - break; - default: - C.type = "Unknown"; - break; - } - res.push_back(C); - } - return res; -} -#endif - -std::vector GetCacheSizes() { -#ifdef BENCHMARK_OS_MACOSX - return GetCacheSizesMacOSX(); -#elif defined(BENCHMARK_OS_WINDOWS) - return GetCacheSizesWindows(); -#else - return GetCacheSizesFromKVFS(); -#endif -} - -int GetNumCPUs() { -#ifdef BENCHMARK_HAS_SYSCTL - int NumCPU = -1; - if (GetSysctl("hw.ncpu", &NumCPU)) return NumCPU; - fprintf(stderr, "Err: %s\n", strerror(errno)); - std::exit(EXIT_FAILURE); -#elif defined(BENCHMARK_OS_WINDOWS) - SYSTEM_INFO sysinfo; - // Use memset as opposed to = {} to avoid GCC missing initializer false - // positives. - std::memset(&sysinfo, 0, sizeof(SYSTEM_INFO)); - GetSystemInfo(&sysinfo); - return sysinfo.dwNumberOfProcessors; // number of logical - // processors in the current - // group -#elif defined(BENCHMARK_OS_SOLARIS) - // Returns -1 in case of a failure. - int NumCPU = sysconf(_SC_NPROCESSORS_ONLN); - if (NumCPU < 0) { - fprintf(stderr, - "sysconf(_SC_NPROCESSORS_ONLN) failed with error: %s\n", - strerror(errno)); - } - return NumCPU; -#else - int NumCPUs = 0; - int MaxID = -1; - std::ifstream f("/proc/cpuinfo"); - if (!f.is_open()) { - std::cerr << "failed to open /proc/cpuinfo\n"; - return -1; - } - const std::string Key = "processor"; - std::string ln; - while (std::getline(f, ln)) { - if (ln.empty()) continue; - size_t SplitIdx = ln.find(':'); - std::string value; - if (SplitIdx != std::string::npos) value = ln.substr(SplitIdx + 1); - if (ln.size() >= Key.size() && ln.compare(0, Key.size(), Key) == 0) { - NumCPUs++; - if (!value.empty()) { - int CurID = std::stoi(value); - MaxID = std::max(CurID, MaxID); - } - } - } - if (f.bad()) { - std::cerr << "Failure reading /proc/cpuinfo\n"; - return -1; - } - if (!f.eof()) { - std::cerr << "Failed to read to end of /proc/cpuinfo\n"; - return -1; - } - f.close(); - - if ((MaxID + 1) != NumCPUs) { - fprintf(stderr, - "CPU ID assignments in /proc/cpuinfo seem messed up." - " This is usually caused by a bad BIOS.\n"); - } - return NumCPUs; -#endif - BENCHMARK_UNREACHABLE(); -} - -double GetCPUCyclesPerSecond() { -#if defined BENCHMARK_OS_LINUX || defined BENCHMARK_OS_CYGWIN - long freq; - - // If the kernel is exporting the tsc frequency use that. There are issues - // where cpuinfo_max_freq cannot be relied on because the BIOS may be - // exporintg an invalid p-state (on x86) or p-states may be used to put the - // processor in a new mode (turbo mode). Essentially, those frequencies - // cannot always be relied upon. The same reasons apply to /proc/cpuinfo as - // well. - if (ReadFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz", &freq) - // If CPU scaling is in effect, we want to use the *maximum* frequency, - // not whatever CPU speed some random processor happens to be using now. - || ReadFromFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq", - &freq)) { - // The value is in kHz (as the file name suggests). For example, on a - // 2GHz warpstation, the file contains the value "2000000". - return freq * 1000.0; - } - - const double error_value = -1; - double bogo_clock = error_value; - - std::ifstream f("/proc/cpuinfo"); - if (!f.is_open()) { - std::cerr << "failed to open /proc/cpuinfo\n"; - return error_value; - } - - auto startsWithKey = [](std::string const& Value, std::string const& Key) { - if (Key.size() > Value.size()) return false; - auto Cmp = [&](char X, char Y) { - return std::tolower(X) == std::tolower(Y); - }; - return std::equal(Key.begin(), Key.end(), Value.begin(), Cmp); - }; - - std::string ln; - while (std::getline(f, ln)) { - if (ln.empty()) continue; - size_t SplitIdx = ln.find(':'); - std::string value; - if (SplitIdx != std::string::npos) value = ln.substr(SplitIdx + 1); - // When parsing the "cpu MHz" and "bogomips" (fallback) entries, we only - // accept positive values. Some environments (virtual machines) report zero, - // which would cause infinite looping in WallTime_Init. - if (startsWithKey(ln, "cpu MHz")) { - if (!value.empty()) { - double cycles_per_second = std::stod(value) * 1000000.0; - if (cycles_per_second > 0) return cycles_per_second; - } - } else if (startsWithKey(ln, "bogomips")) { - if (!value.empty()) { - bogo_clock = std::stod(value) * 1000000.0; - if (bogo_clock < 0.0) bogo_clock = error_value; - } - } - } - if (f.bad()) { - std::cerr << "Failure reading /proc/cpuinfo\n"; - return error_value; - } - if (!f.eof()) { - std::cerr << "Failed to read to end of /proc/cpuinfo\n"; - return error_value; - } - f.close(); - // If we found the bogomips clock, but nothing better, we'll use it (but - // we're not happy about it); otherwise, fallback to the rough estimation - // below. - if (bogo_clock >= 0.0) return bogo_clock; - -#elif defined BENCHMARK_HAS_SYSCTL - constexpr auto* FreqStr = -#if defined(BENCHMARK_OS_FREEBSD) || defined(BENCHMARK_OS_NETBSD) - "machdep.tsc_freq"; -#elif defined BENCHMARK_OS_OPENBSD - "hw.cpuspeed"; -#else - "hw.cpufrequency"; -#endif - unsigned long long hz = 0; -#if defined BENCHMARK_OS_OPENBSD - if (GetSysctl(FreqStr, &hz)) return hz * 1000000; -#else - if (GetSysctl(FreqStr, &hz)) return hz; -#endif - fprintf(stderr, "Unable to determine clock rate from sysctl: %s: %s\n", - FreqStr, strerror(errno)); - -#elif defined BENCHMARK_OS_WINDOWS - // In NT, read MHz from the registry. If we fail to do so or we're in win9x - // then make a crude estimate. - DWORD data, data_size = sizeof(data); - if (IsWindowsXPOrGreater() && - SUCCEEDED( - SHGetValueA(HKEY_LOCAL_MACHINE, - "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", - "~MHz", nullptr, &data, &data_size))) - return static_cast((int64_t)data * - (int64_t)(1000 * 1000)); // was mhz -#elif defined (BENCHMARK_OS_SOLARIS) - kstat_ctl_t *kc = kstat_open(); - if (!kc) { - std::cerr << "failed to open /dev/kstat\n"; - return -1; - } - kstat_t *ksp = kstat_lookup(kc, (char*)"cpu_info", -1, (char*)"cpu_info0"); - if (!ksp) { - std::cerr << "failed to lookup in /dev/kstat\n"; - return -1; - } - if (kstat_read(kc, ksp, NULL) < 0) { - std::cerr << "failed to read from /dev/kstat\n"; - return -1; - } - kstat_named_t *knp = - (kstat_named_t*)kstat_data_lookup(ksp, (char*)"current_clock_Hz"); - if (!knp) { - std::cerr << "failed to lookup data in /dev/kstat\n"; - return -1; - } - if (knp->data_type != KSTAT_DATA_UINT64) { - std::cerr << "current_clock_Hz is of unexpected data type: " - << knp->data_type << "\n"; - return -1; - } - double clock_hz = knp->value.ui64; - kstat_close(kc); - return clock_hz; -#endif - // If we've fallen through, attempt to roughly estimate the CPU clock rate. - const int estimate_time_ms = 1000; - const auto start_ticks = cycleclock::Now(); - SleepForMilliseconds(estimate_time_ms); - return static_cast(cycleclock::Now() - start_ticks); -} - -} // end namespace - -const CPUInfo& CPUInfo::Get() { - static const CPUInfo* info = new CPUInfo(); - return *info; -} - -CPUInfo::CPUInfo() - : num_cpus(GetNumCPUs()), - cycles_per_second(GetCPUCyclesPerSecond()), - caches(GetCacheSizes()), - scaling_enabled(CpuScalingEnabled(num_cpus)) {} - -} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/thread_manager.h b/benchmarks/thirdparty/benchmark/src/thread_manager.h deleted file mode 100755 index 82b4d72b62..0000000000 --- a/benchmarks/thirdparty/benchmark/src/thread_manager.h +++ /dev/null @@ -1,66 +0,0 @@ -#ifndef BENCHMARK_THREAD_MANAGER_H -#define BENCHMARK_THREAD_MANAGER_H - -#include - -#include "benchmark/benchmark.h" -#include "mutex.h" - -namespace benchmark { -namespace internal { - -class ThreadManager { - public: - ThreadManager(int num_threads) - : alive_threads_(num_threads), start_stop_barrier_(num_threads) {} - - Mutex& GetBenchmarkMutex() const RETURN_CAPABILITY(benchmark_mutex_) { - return benchmark_mutex_; - } - - bool StartStopBarrier() EXCLUDES(end_cond_mutex_) { - return start_stop_barrier_.wait(); - } - - void NotifyThreadComplete() EXCLUDES(end_cond_mutex_) { - start_stop_barrier_.removeThread(); - if (--alive_threads_ == 0) { - MutexLock lock(end_cond_mutex_); - end_condition_.notify_all(); - } - } - - void WaitForAllThreads() EXCLUDES(end_cond_mutex_) { - MutexLock lock(end_cond_mutex_); - end_condition_.wait(lock.native_handle(), - [this]() { return alive_threads_ == 0; }); - } - - public: - struct Result { - int64_t iterations = 0; - double real_time_used = 0; - double cpu_time_used = 0; - double manual_time_used = 0; - int64_t bytes_processed = 0; - int64_t items_processed = 0; - int64_t complexity_n = 0; - std::string report_label_; - std::string error_message_; - bool has_error_ = false; - UserCounters counters; - }; - GUARDED_BY(GetBenchmarkMutex()) Result results; - - private: - mutable Mutex benchmark_mutex_; - std::atomic alive_threads_; - Barrier start_stop_barrier_; - Mutex end_cond_mutex_; - Condition end_condition_; -}; - -} // namespace internal -} // namespace benchmark - -#endif // BENCHMARK_THREAD_MANAGER_H diff --git a/benchmarks/thirdparty/benchmark/src/thread_timer.h b/benchmarks/thirdparty/benchmark/src/thread_timer.h deleted file mode 100755 index eaf108e017..0000000000 --- a/benchmarks/thirdparty/benchmark/src/thread_timer.h +++ /dev/null @@ -1,69 +0,0 @@ -#ifndef BENCHMARK_THREAD_TIMER_H -#define BENCHMARK_THREAD_TIMER_H - -#include "check.h" -#include "timers.h" - -namespace benchmark { -namespace internal { - -class ThreadTimer { - public: - ThreadTimer() = default; - - // Called by each thread - void StartTimer() { - running_ = true; - start_real_time_ = ChronoClockNow(); - start_cpu_time_ = ThreadCPUUsage(); - } - - // Called by each thread - void StopTimer() { - CHECK(running_); - running_ = false; - real_time_used_ += ChronoClockNow() - start_real_time_; - // Floating point error can result in the subtraction producing a negative - // time. Guard against that. - cpu_time_used_ += std::max(ThreadCPUUsage() - start_cpu_time_, 0); - } - - // Called by each thread - void SetIterationTime(double seconds) { manual_time_used_ += seconds; } - - bool running() const { return running_; } - - // REQUIRES: timer is not running - double real_time_used() { - CHECK(!running_); - return real_time_used_; - } - - // REQUIRES: timer is not running - double cpu_time_used() { - CHECK(!running_); - return cpu_time_used_; - } - - // REQUIRES: timer is not running - double manual_time_used() { - CHECK(!running_); - return manual_time_used_; - } - - private: - bool running_ = false; // Is the timer running - double start_real_time_ = 0; // If running_ - double start_cpu_time_ = 0; // If running_ - - // Accumulated time so far (does not contain current slice if running_) - double real_time_used_ = 0; - double cpu_time_used_ = 0; - // Manually set iteration time. User sets this with SetIterationTime(seconds). - double manual_time_used_ = 0; -}; - -} // namespace internal -} // namespace benchmark - -#endif // BENCHMARK_THREAD_TIMER_H diff --git a/benchmarks/thirdparty/benchmark/src/timers.cc b/benchmarks/thirdparty/benchmark/src/timers.cc deleted file mode 100755 index 2010e2450b..0000000000 --- a/benchmarks/thirdparty/benchmark/src/timers.cc +++ /dev/null @@ -1,217 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "timers.h" -#include "internal_macros.h" - -#ifdef BENCHMARK_OS_WINDOWS -#include -#undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA -#include -#include -#else -#include -#ifndef BENCHMARK_OS_FUCHSIA -#include -#endif -#include -#include // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD -#include -#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX -#include -#endif -#if defined(BENCHMARK_OS_MACOSX) -#include -#include -#include -#endif -#endif - -#ifdef BENCHMARK_OS_EMSCRIPTEN -#include -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "check.h" -#include "log.h" -#include "sleep.h" -#include "string_util.h" - -namespace benchmark { - -// Suppress unused warnings on helper functions. -#if defined(__GNUC__) -#pragma GCC diagnostic ignored "-Wunused-function" -#endif - -namespace { -#if defined(BENCHMARK_OS_WINDOWS) -double MakeTime(FILETIME const& kernel_time, FILETIME const& user_time) { - ULARGE_INTEGER kernel; - ULARGE_INTEGER user; - kernel.HighPart = kernel_time.dwHighDateTime; - kernel.LowPart = kernel_time.dwLowDateTime; - user.HighPart = user_time.dwHighDateTime; - user.LowPart = user_time.dwLowDateTime; - return (static_cast(kernel.QuadPart) + - static_cast(user.QuadPart)) * - 1e-7; -} -#elif !defined(BENCHMARK_OS_FUCHSIA) -double MakeTime(struct rusage const& ru) { - return (static_cast(ru.ru_utime.tv_sec) + - static_cast(ru.ru_utime.tv_usec) * 1e-6 + - static_cast(ru.ru_stime.tv_sec) + - static_cast(ru.ru_stime.tv_usec) * 1e-6); -} -#endif -#if defined(BENCHMARK_OS_MACOSX) -double MakeTime(thread_basic_info_data_t const& info) { - return (static_cast(info.user_time.seconds) + - static_cast(info.user_time.microseconds) * 1e-6 + - static_cast(info.system_time.seconds) + - static_cast(info.system_time.microseconds) * 1e-6); -} -#endif -#if defined(CLOCK_PROCESS_CPUTIME_ID) || defined(CLOCK_THREAD_CPUTIME_ID) -double MakeTime(struct timespec const& ts) { - return ts.tv_sec + (static_cast(ts.tv_nsec) * 1e-9); -} -#endif - -BENCHMARK_NORETURN static void DiagnoseAndExit(const char* msg) { - std::cerr << "ERROR: " << msg << std::endl; - std::exit(EXIT_FAILURE); -} - -} // end namespace - -double ProcessCPUUsage() { -#if defined(BENCHMARK_OS_WINDOWS) - HANDLE proc = GetCurrentProcess(); - FILETIME creation_time; - FILETIME exit_time; - FILETIME kernel_time; - FILETIME user_time; - if (GetProcessTimes(proc, &creation_time, &exit_time, &kernel_time, - &user_time)) - return MakeTime(kernel_time, user_time); - DiagnoseAndExit("GetProccessTimes() failed"); -#elif defined(BENCHMARK_OS_EMSCRIPTEN) - // clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) returns 0 on Emscripten. - // Use Emscripten-specific API. Reported CPU time would be exactly the - // same as total time, but this is ok because there aren't long-latency - // syncronous system calls in Emscripten. - return emscripten_get_now() * 1e-3; -#elif defined(CLOCK_PROCESS_CPUTIME_ID) && !defined(BENCHMARK_OS_MACOSX) - // FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See - // https://github.com/google/benchmark/pull/292 - struct timespec spec; - if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &spec) == 0) - return MakeTime(spec); - DiagnoseAndExit("clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) failed"); -#else - struct rusage ru; - if (getrusage(RUSAGE_SELF, &ru) == 0) return MakeTime(ru); - DiagnoseAndExit("getrusage(RUSAGE_SELF, ...) failed"); -#endif -} - -double ThreadCPUUsage() { -#if defined(BENCHMARK_OS_WINDOWS) - HANDLE this_thread = GetCurrentThread(); - FILETIME creation_time; - FILETIME exit_time; - FILETIME kernel_time; - FILETIME user_time; - GetThreadTimes(this_thread, &creation_time, &exit_time, &kernel_time, - &user_time); - return MakeTime(kernel_time, user_time); -#elif defined(BENCHMARK_OS_MACOSX) - // FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See - // https://github.com/google/benchmark/pull/292 - mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT; - thread_basic_info_data_t info; - mach_port_t thread = pthread_mach_thread_np(pthread_self()); - if (thread_info(thread, THREAD_BASIC_INFO, (thread_info_t)&info, &count) == - KERN_SUCCESS) { - return MakeTime(info); - } - DiagnoseAndExit("ThreadCPUUsage() failed when evaluating thread_info"); -#elif defined(BENCHMARK_OS_EMSCRIPTEN) - // Emscripten doesn't support traditional threads - return ProcessCPUUsage(); -#elif defined(BENCHMARK_OS_RTEMS) - // RTEMS doesn't support CLOCK_THREAD_CPUTIME_ID. See - // https://github.com/RTEMS/rtems/blob/master/cpukit/posix/src/clockgettime.c - return ProcessCPUUsage(); -#elif defined(BENCHMARK_OS_SOLARIS) - struct rusage ru; - if (getrusage(RUSAGE_LWP, &ru) == 0) return MakeTime(ru); - DiagnoseAndExit("getrusage(RUSAGE_LWP, ...) failed"); -#elif defined(CLOCK_THREAD_CPUTIME_ID) - struct timespec ts; - if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts) == 0) return MakeTime(ts); - DiagnoseAndExit("clock_gettime(CLOCK_THREAD_CPUTIME_ID, ...) failed"); -#else -#error Per-thread timing is not available on your system. -#endif -} - -namespace { - -std::string DateTimeString(bool local) { - typedef std::chrono::system_clock Clock; - std::time_t now = Clock::to_time_t(Clock::now()); - const std::size_t kStorageSize = 128; - char storage[kStorageSize]; - std::size_t written; - - if (local) { -#if defined(BENCHMARK_OS_WINDOWS) - written = - std::strftime(storage, sizeof(storage), "%x %X", ::localtime(&now)); -#else - std::tm timeinfo; - ::localtime_r(&now, &timeinfo); - written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo); -#endif - } else { -#if defined(BENCHMARK_OS_WINDOWS) - written = std::strftime(storage, sizeof(storage), "%x %X", ::gmtime(&now)); -#else - std::tm timeinfo; - ::gmtime_r(&now, &timeinfo); - written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo); -#endif - } - CHECK(written < kStorageSize); - ((void)written); // prevent unused variable in optimized mode. - return std::string(storage); -} - -} // end namespace - -std::string LocalDateTimeString() { return DateTimeString(true); } - -} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/timers.h b/benchmarks/thirdparty/benchmark/src/timers.h deleted file mode 100755 index 65606ccd93..0000000000 --- a/benchmarks/thirdparty/benchmark/src/timers.h +++ /dev/null @@ -1,48 +0,0 @@ -#ifndef BENCHMARK_TIMERS_H -#define BENCHMARK_TIMERS_H - -#include -#include - -namespace benchmark { - -// Return the CPU usage of the current process -double ProcessCPUUsage(); - -// Return the CPU usage of the children of the current process -double ChildrenCPUUsage(); - -// Return the CPU usage of the current thread -double ThreadCPUUsage(); - -#if defined(HAVE_STEADY_CLOCK) -template -struct ChooseSteadyClock { - typedef std::chrono::high_resolution_clock type; -}; - -template <> -struct ChooseSteadyClock { - typedef std::chrono::steady_clock type; -}; -#endif - -struct ChooseClockType { -#if defined(HAVE_STEADY_CLOCK) - typedef ChooseSteadyClock<>::type type; -#else - typedef std::chrono::high_resolution_clock type; -#endif -}; - -inline double ChronoClockNow() { - typedef ChooseClockType::type ClockType; - using FpSeconds = std::chrono::duration; - return FpSeconds(ClockType::now().time_since_epoch()).count(); -} - -std::string LocalDateTimeString(); - -} // end namespace benchmark - -#endif // BENCHMARK_TIMERS_H diff --git a/benchmarks/thirdparty/benchmark/tools/compare.py b/benchmarks/thirdparty/benchmark/tools/compare.py deleted file mode 100755 index f0a4455f5f..0000000000 --- a/benchmarks/thirdparty/benchmark/tools/compare.py +++ /dev/null @@ -1,316 +0,0 @@ -#!/usr/bin/env python - -""" -compare.py - versatile benchmark output compare tool -""" - -import argparse -from argparse import ArgumentParser -import sys -import gbench -from gbench import util, report -from gbench.util import * - - -def check_inputs(in1, in2, flags): - """ - Perform checking on the user provided inputs and diagnose any abnormalities - """ - in1_kind, in1_err = classify_input_file(in1) - in2_kind, in2_err = classify_input_file(in2) - output_file = find_benchmark_flag('--benchmark_out=', flags) - output_type = find_benchmark_flag('--benchmark_out_format=', flags) - if in1_kind == IT_Executable and in2_kind == IT_Executable and output_file: - print(("WARNING: '--benchmark_out=%s' will be passed to both " - "benchmarks causing it to be overwritten") % output_file) - if in1_kind == IT_JSON and in2_kind == IT_JSON and len(flags) > 0: - print("WARNING: passing optional flags has no effect since both " - "inputs are JSON") - if output_type is not None and output_type != 'json': - print(("ERROR: passing '--benchmark_out_format=%s' to 'compare.py`" - " is not supported.") % output_type) - sys.exit(1) - - -def create_parser(): - parser = ArgumentParser( - description='versatile benchmark output compare tool') - subparsers = parser.add_subparsers( - help='This tool has multiple modes of operation:', - dest='mode') - - parser_a = subparsers.add_parser( - 'benchmarks', - help='The most simple use-case, compare all the output of these two benchmarks') - baseline = parser_a.add_argument_group( - 'baseline', 'The benchmark baseline') - baseline.add_argument( - 'test_baseline', - metavar='test_baseline', - type=argparse.FileType('r'), - nargs=1, - help='A benchmark executable or JSON output file') - contender = parser_a.add_argument_group( - 'contender', 'The benchmark that will be compared against the baseline') - contender.add_argument( - 'test_contender', - metavar='test_contender', - type=argparse.FileType('r'), - nargs=1, - help='A benchmark executable or JSON output file') - parser_a.add_argument( - 'benchmark_options', - metavar='benchmark_options', - nargs=argparse.REMAINDER, - help='Arguments to pass when running benchmark executables') - - parser_b = subparsers.add_parser( - 'filters', help='Compare filter one with the filter two of benchmark') - baseline = parser_b.add_argument_group( - 'baseline', 'The benchmark baseline') - baseline.add_argument( - 'test', - metavar='test', - type=argparse.FileType('r'), - nargs=1, - help='A benchmark executable or JSON output file') - baseline.add_argument( - 'filter_baseline', - metavar='filter_baseline', - type=str, - nargs=1, - help='The first filter, that will be used as baseline') - contender = parser_b.add_argument_group( - 'contender', 'The benchmark that will be compared against the baseline') - contender.add_argument( - 'filter_contender', - metavar='filter_contender', - type=str, - nargs=1, - help='The second filter, that will be compared against the baseline') - parser_b.add_argument( - 'benchmark_options', - metavar='benchmark_options', - nargs=argparse.REMAINDER, - help='Arguments to pass when running benchmark executables') - - parser_c = subparsers.add_parser( - 'benchmarksfiltered', - help='Compare filter one of first benchmark with filter two of the second benchmark') - baseline = parser_c.add_argument_group( - 'baseline', 'The benchmark baseline') - baseline.add_argument( - 'test_baseline', - metavar='test_baseline', - type=argparse.FileType('r'), - nargs=1, - help='A benchmark executable or JSON output file') - baseline.add_argument( - 'filter_baseline', - metavar='filter_baseline', - type=str, - nargs=1, - help='The first filter, that will be used as baseline') - contender = parser_c.add_argument_group( - 'contender', 'The benchmark that will be compared against the baseline') - contender.add_argument( - 'test_contender', - metavar='test_contender', - type=argparse.FileType('r'), - nargs=1, - help='The second benchmark executable or JSON output file, that will be compared against the baseline') - contender.add_argument( - 'filter_contender', - metavar='filter_contender', - type=str, - nargs=1, - help='The second filter, that will be compared against the baseline') - parser_c.add_argument( - 'benchmark_options', - metavar='benchmark_options', - nargs=argparse.REMAINDER, - help='Arguments to pass when running benchmark executables') - - return parser - - -def main(): - # Parse the command line flags - parser = create_parser() - args, unknown_args = parser.parse_known_args() - if args.mode is None: - parser.print_help() - exit(1) - assert not unknown_args - benchmark_options = args.benchmark_options - - if args.mode == 'benchmarks': - test_baseline = args.test_baseline[0].name - test_contender = args.test_contender[0].name - filter_baseline = '' - filter_contender = '' - - # NOTE: if test_baseline == test_contender, you are analyzing the stdev - - description = 'Comparing %s to %s' % (test_baseline, test_contender) - elif args.mode == 'filters': - test_baseline = args.test[0].name - test_contender = args.test[0].name - filter_baseline = args.filter_baseline[0] - filter_contender = args.filter_contender[0] - - # NOTE: if filter_baseline == filter_contender, you are analyzing the - # stdev - - description = 'Comparing %s to %s (from %s)' % ( - filter_baseline, filter_contender, args.test[0].name) - elif args.mode == 'benchmarksfiltered': - test_baseline = args.test_baseline[0].name - test_contender = args.test_contender[0].name - filter_baseline = args.filter_baseline[0] - filter_contender = args.filter_contender[0] - - # NOTE: if test_baseline == test_contender and - # filter_baseline == filter_contender, you are analyzing the stdev - - description = 'Comparing %s (from %s) to %s (from %s)' % ( - filter_baseline, test_baseline, filter_contender, test_contender) - else: - # should never happen - print("Unrecognized mode of operation: '%s'" % args.mode) - parser.print_help() - exit(1) - - check_inputs(test_baseline, test_contender, benchmark_options) - - options_baseline = [] - options_contender = [] - - if filter_baseline and filter_contender: - options_baseline = ['--benchmark_filter=%s' % filter_baseline] - options_contender = ['--benchmark_filter=%s' % filter_contender] - - # Run the benchmarks and report the results - json1 = json1_orig = gbench.util.run_or_load_benchmark( - test_baseline, benchmark_options + options_baseline) - json2 = json2_orig = gbench.util.run_or_load_benchmark( - test_contender, benchmark_options + options_contender) - - # Now, filter the benchmarks so that the difference report can work - if filter_baseline and filter_contender: - replacement = '[%s vs. %s]' % (filter_baseline, filter_contender) - json1 = gbench.report.filter_benchmark( - json1_orig, filter_baseline, replacement) - json2 = gbench.report.filter_benchmark( - json2_orig, filter_contender, replacement) - - # Diff and output - output_lines = gbench.report.generate_difference_report(json1, json2) - print(description) - for ln in output_lines: - print(ln) - - -import unittest - - -class TestParser(unittest.TestCase): - def setUp(self): - self.parser = create_parser() - testInputs = os.path.join( - os.path.dirname( - os.path.realpath(__file__)), - 'gbench', - 'Inputs') - self.testInput0 = os.path.join(testInputs, 'test1_run1.json') - self.testInput1 = os.path.join(testInputs, 'test1_run2.json') - - def test_benchmarks_basic(self): - parsed = self.parser.parse_args( - ['benchmarks', self.testInput0, self.testInput1]) - self.assertEqual(parsed.mode, 'benchmarks') - self.assertEqual(parsed.test_baseline[0].name, self.testInput0) - self.assertEqual(parsed.test_contender[0].name, self.testInput1) - self.assertFalse(parsed.benchmark_options) - - def test_benchmarks_with_remainder(self): - parsed = self.parser.parse_args( - ['benchmarks', self.testInput0, self.testInput1, 'd']) - self.assertEqual(parsed.mode, 'benchmarks') - self.assertEqual(parsed.test_baseline[0].name, self.testInput0) - self.assertEqual(parsed.test_contender[0].name, self.testInput1) - self.assertEqual(parsed.benchmark_options, ['d']) - - def test_benchmarks_with_remainder_after_doubleminus(self): - parsed = self.parser.parse_args( - ['benchmarks', self.testInput0, self.testInput1, '--', 'e']) - self.assertEqual(parsed.mode, 'benchmarks') - self.assertEqual(parsed.test_baseline[0].name, self.testInput0) - self.assertEqual(parsed.test_contender[0].name, self.testInput1) - self.assertEqual(parsed.benchmark_options, ['e']) - - def test_filters_basic(self): - parsed = self.parser.parse_args( - ['filters', self.testInput0, 'c', 'd']) - self.assertEqual(parsed.mode, 'filters') - self.assertEqual(parsed.test[0].name, self.testInput0) - self.assertEqual(parsed.filter_baseline[0], 'c') - self.assertEqual(parsed.filter_contender[0], 'd') - self.assertFalse(parsed.benchmark_options) - - def test_filters_with_remainder(self): - parsed = self.parser.parse_args( - ['filters', self.testInput0, 'c', 'd', 'e']) - self.assertEqual(parsed.mode, 'filters') - self.assertEqual(parsed.test[0].name, self.testInput0) - self.assertEqual(parsed.filter_baseline[0], 'c') - self.assertEqual(parsed.filter_contender[0], 'd') - self.assertEqual(parsed.benchmark_options, ['e']) - - def test_filters_with_remainder_after_doubleminus(self): - parsed = self.parser.parse_args( - ['filters', self.testInput0, 'c', 'd', '--', 'f']) - self.assertEqual(parsed.mode, 'filters') - self.assertEqual(parsed.test[0].name, self.testInput0) - self.assertEqual(parsed.filter_baseline[0], 'c') - self.assertEqual(parsed.filter_contender[0], 'd') - self.assertEqual(parsed.benchmark_options, ['f']) - - def test_benchmarksfiltered_basic(self): - parsed = self.parser.parse_args( - ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e']) - self.assertEqual(parsed.mode, 'benchmarksfiltered') - self.assertEqual(parsed.test_baseline[0].name, self.testInput0) - self.assertEqual(parsed.filter_baseline[0], 'c') - self.assertEqual(parsed.test_contender[0].name, self.testInput1) - self.assertEqual(parsed.filter_contender[0], 'e') - self.assertFalse(parsed.benchmark_options) - - def test_benchmarksfiltered_with_remainder(self): - parsed = self.parser.parse_args( - ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', 'f']) - self.assertEqual(parsed.mode, 'benchmarksfiltered') - self.assertEqual(parsed.test_baseline[0].name, self.testInput0) - self.assertEqual(parsed.filter_baseline[0], 'c') - self.assertEqual(parsed.test_contender[0].name, self.testInput1) - self.assertEqual(parsed.filter_contender[0], 'e') - self.assertEqual(parsed.benchmark_options[0], 'f') - - def test_benchmarksfiltered_with_remainder_after_doubleminus(self): - parsed = self.parser.parse_args( - ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', '--', 'g']) - self.assertEqual(parsed.mode, 'benchmarksfiltered') - self.assertEqual(parsed.test_baseline[0].name, self.testInput0) - self.assertEqual(parsed.filter_baseline[0], 'c') - self.assertEqual(parsed.test_contender[0].name, self.testInput1) - self.assertEqual(parsed.filter_contender[0], 'e') - self.assertEqual(parsed.benchmark_options[0], 'g') - - -if __name__ == '__main__': - # unittest.main() - main() - -# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 -# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off; -# kate: indent-mode python; remove-trailing-spaces modified; diff --git a/benchmarks/thirdparty/benchmark/tools/compare_bench.py b/benchmarks/thirdparty/benchmark/tools/compare_bench.py deleted file mode 100755 index 7bbf0d0157..0000000000 --- a/benchmarks/thirdparty/benchmark/tools/compare_bench.py +++ /dev/null @@ -1,67 +0,0 @@ -#!/usr/bin/env python -""" -compare_bench.py - Compare two benchmarks or their results and report the - difference. -""" -import argparse -from argparse import ArgumentParser -import sys -import gbench -from gbench import util, report -from gbench.util import * - -def check_inputs(in1, in2, flags): - """ - Perform checking on the user provided inputs and diagnose any abnormalities - """ - in1_kind, in1_err = classify_input_file(in1) - in2_kind, in2_err = classify_input_file(in2) - output_file = find_benchmark_flag('--benchmark_out=', flags) - output_type = find_benchmark_flag('--benchmark_out_format=', flags) - if in1_kind == IT_Executable and in2_kind == IT_Executable and output_file: - print(("WARNING: '--benchmark_out=%s' will be passed to both " - "benchmarks causing it to be overwritten") % output_file) - if in1_kind == IT_JSON and in2_kind == IT_JSON and len(flags) > 0: - print("WARNING: passing --benchmark flags has no effect since both " - "inputs are JSON") - if output_type is not None and output_type != 'json': - print(("ERROR: passing '--benchmark_out_format=%s' to 'compare_bench.py`" - " is not supported.") % output_type) - sys.exit(1) - - -def main(): - parser = ArgumentParser( - description='compare the results of two benchmarks') - parser.add_argument( - 'test1', metavar='test1', type=str, nargs=1, - help='A benchmark executable or JSON output file') - parser.add_argument( - 'test2', metavar='test2', type=str, nargs=1, - help='A benchmark executable or JSON output file') - parser.add_argument( - 'benchmark_options', metavar='benchmark_options', nargs=argparse.REMAINDER, - help='Arguments to pass when running benchmark executables' - ) - args, unknown_args = parser.parse_known_args() - # Parse the command line flags - test1 = args.test1[0] - test2 = args.test2[0] - if unknown_args: - # should never happen - print("Unrecognized positional argument arguments: '%s'" - % unknown_args) - exit(1) - benchmark_options = args.benchmark_options - check_inputs(test1, test2, benchmark_options) - # Run the benchmarks and report the results - json1 = gbench.util.run_or_load_benchmark(test1, benchmark_options) - json2 = gbench.util.run_or_load_benchmark(test2, benchmark_options) - output_lines = gbench.report.generate_difference_report(json1, json2) - print('Comparing %s to %s' % (test1, test2)) - for ln in output_lines: - print(ln) - - -if __name__ == '__main__': - main() diff --git a/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test1_run1.json b/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test1_run1.json deleted file mode 100755 index d7ec6a9c8f..0000000000 --- a/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test1_run1.json +++ /dev/null @@ -1,102 +0,0 @@ -{ - "context": { - "date": "2016-08-02 17:44:46", - "num_cpus": 4, - "mhz_per_cpu": 4228, - "cpu_scaling_enabled": false, - "library_build_type": "release" - }, - "benchmarks": [ - { - "name": "BM_SameTimes", - "iterations": 1000, - "real_time": 10, - "cpu_time": 10, - "time_unit": "ns" - }, - { - "name": "BM_2xFaster", - "iterations": 1000, - "real_time": 50, - "cpu_time": 50, - "time_unit": "ns" - }, - { - "name": "BM_2xSlower", - "iterations": 1000, - "real_time": 50, - "cpu_time": 50, - "time_unit": "ns" - }, - { - "name": "BM_1PercentFaster", - "iterations": 1000, - "real_time": 100, - "cpu_time": 100, - "time_unit": "ns" - }, - { - "name": "BM_1PercentSlower", - "iterations": 1000, - "real_time": 100, - "cpu_time": 100, - "time_unit": "ns" - }, - { - "name": "BM_10PercentFaster", - "iterations": 1000, - "real_time": 100, - "cpu_time": 100, - "time_unit": "ns" - }, - { - "name": "BM_10PercentSlower", - "iterations": 1000, - "real_time": 100, - "cpu_time": 100, - "time_unit": "ns" - }, - { - "name": "BM_100xSlower", - "iterations": 1000, - "real_time": 100, - "cpu_time": 100, - "time_unit": "ns" - }, - { - "name": "BM_100xFaster", - "iterations": 1000, - "real_time": 10000, - "cpu_time": 10000, - "time_unit": "ns" - }, - { - "name": "BM_10PercentCPUToTime", - "iterations": 1000, - "real_time": 100, - "cpu_time": 100, - "time_unit": "ns" - }, - { - "name": "BM_ThirdFaster", - "iterations": 1000, - "real_time": 100, - "cpu_time": 100, - "time_unit": "ns" - }, - { - "name": "BM_BadTimeUnit", - "iterations": 1000, - "real_time": 0.4, - "cpu_time": 0.5, - "time_unit": "s" - }, - { - "name": "BM_DifferentTimeUnit", - "iterations": 1, - "real_time": 1, - "cpu_time": 1, - "time_unit": "s" - } - ] -} diff --git a/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test1_run2.json b/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test1_run2.json deleted file mode 100755 index 59a5ffaca4..0000000000 --- a/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test1_run2.json +++ /dev/null @@ -1,102 +0,0 @@ -{ - "context": { - "date": "2016-08-02 17:44:46", - "num_cpus": 4, - "mhz_per_cpu": 4228, - "cpu_scaling_enabled": false, - "library_build_type": "release" - }, - "benchmarks": [ - { - "name": "BM_SameTimes", - "iterations": 1000, - "real_time": 10, - "cpu_time": 10, - "time_unit": "ns" - }, - { - "name": "BM_2xFaster", - "iterations": 1000, - "real_time": 25, - "cpu_time": 25, - "time_unit": "ns" - }, - { - "name": "BM_2xSlower", - "iterations": 20833333, - "real_time": 100, - "cpu_time": 100, - "time_unit": "ns" - }, - { - "name": "BM_1PercentFaster", - "iterations": 1000, - "real_time": 98.9999999, - "cpu_time": 98.9999999, - "time_unit": "ns" - }, - { - "name": "BM_1PercentSlower", - "iterations": 1000, - "real_time": 100.9999999, - "cpu_time": 100.9999999, - "time_unit": "ns" - }, - { - "name": "BM_10PercentFaster", - "iterations": 1000, - "real_time": 90, - "cpu_time": 90, - "time_unit": "ns" - }, - { - "name": "BM_10PercentSlower", - "iterations": 1000, - "real_time": 110, - "cpu_time": 110, - "time_unit": "ns" - }, - { - "name": "BM_100xSlower", - "iterations": 1000, - "real_time": 1.0000e+04, - "cpu_time": 1.0000e+04, - "time_unit": "ns" - }, - { - "name": "BM_100xFaster", - "iterations": 1000, - "real_time": 100, - "cpu_time": 100, - "time_unit": "ns" - }, - { - "name": "BM_10PercentCPUToTime", - "iterations": 1000, - "real_time": 110, - "cpu_time": 90, - "time_unit": "ns" - }, - { - "name": "BM_ThirdFaster", - "iterations": 1000, - "real_time": 66.665, - "cpu_time": 66.664, - "time_unit": "ns" - }, - { - "name": "BM_BadTimeUnit", - "iterations": 1000, - "real_time": 0.04, - "cpu_time": 0.6, - "time_unit": "s" - }, - { - "name": "BM_DifferentTimeUnit", - "iterations": 1, - "real_time": 1, - "cpu_time": 1, - "time_unit": "ns" - } - ] -} diff --git a/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test2_run.json b/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test2_run.json deleted file mode 100755 index 15bc698030..0000000000 --- a/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test2_run.json +++ /dev/null @@ -1,81 +0,0 @@ -{ - "context": { - "date": "2016-08-02 17:44:46", - "num_cpus": 4, - "mhz_per_cpu": 4228, - "cpu_scaling_enabled": false, - "library_build_type": "release" - }, - "benchmarks": [ - { - "name": "BM_Hi", - "iterations": 1234, - "real_time": 42, - "cpu_time": 24, - "time_unit": "ms" - }, - { - "name": "BM_Zero", - "iterations": 1000, - "real_time": 10, - "cpu_time": 10, - "time_unit": "ns" - }, - { - "name": "BM_Zero/4", - "iterations": 4000, - "real_time": 40, - "cpu_time": 40, - "time_unit": "ns" - }, - { - "name": "Prefix/BM_Zero", - "iterations": 2000, - "real_time": 20, - "cpu_time": 20, - "time_unit": "ns" - }, - { - "name": "Prefix/BM_Zero/3", - "iterations": 3000, - "real_time": 30, - "cpu_time": 30, - "time_unit": "ns" - }, - { - "name": "BM_One", - "iterations": 5000, - "real_time": 5, - "cpu_time": 5, - "time_unit": "ns" - }, - { - "name": "BM_One/4", - "iterations": 2000, - "real_time": 20, - "cpu_time": 20, - "time_unit": "ns" - }, - { - "name": "Prefix/BM_One", - "iterations": 1000, - "real_time": 10, - "cpu_time": 10, - "time_unit": "ns" - }, - { - "name": "Prefix/BM_One/3", - "iterations": 1500, - "real_time": 15, - "cpu_time": 15, - "time_unit": "ns" - }, - { - "name": "BM_Bye", - "iterations": 5321, - "real_time": 11, - "cpu_time": 63, - "time_unit": "ns" - } - ] -} diff --git a/benchmarks/thirdparty/benchmark/tools/gbench/__init__.py b/benchmarks/thirdparty/benchmark/tools/gbench/__init__.py deleted file mode 100755 index fce1a1acfb..0000000000 --- a/benchmarks/thirdparty/benchmark/tools/gbench/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -"""Google Benchmark tooling""" - -__author__ = 'Eric Fiselier' -__email__ = 'eric@efcs.ca' -__versioninfo__ = (0, 5, 0) -__version__ = '.'.join(str(v) for v in __versioninfo__) + 'dev' - -__all__ = [] diff --git a/benchmarks/thirdparty/benchmark/tools/gbench/report.py b/benchmarks/thirdparty/benchmark/tools/gbench/report.py deleted file mode 100755 index 0c090981a8..0000000000 --- a/benchmarks/thirdparty/benchmark/tools/gbench/report.py +++ /dev/null @@ -1,208 +0,0 @@ -"""report.py - Utilities for reporting statistics about benchmark results -""" -import os -import re -import copy - -class BenchmarkColor(object): - def __init__(self, name, code): - self.name = name - self.code = code - - def __repr__(self): - return '%s%r' % (self.__class__.__name__, - (self.name, self.code)) - - def __format__(self, format): - return self.code - -# Benchmark Colors Enumeration -BC_NONE = BenchmarkColor('NONE', '') -BC_MAGENTA = BenchmarkColor('MAGENTA', '\033[95m') -BC_CYAN = BenchmarkColor('CYAN', '\033[96m') -BC_OKBLUE = BenchmarkColor('OKBLUE', '\033[94m') -BC_HEADER = BenchmarkColor('HEADER', '\033[92m') -BC_WARNING = BenchmarkColor('WARNING', '\033[93m') -BC_WHITE = BenchmarkColor('WHITE', '\033[97m') -BC_FAIL = BenchmarkColor('FAIL', '\033[91m') -BC_ENDC = BenchmarkColor('ENDC', '\033[0m') -BC_BOLD = BenchmarkColor('BOLD', '\033[1m') -BC_UNDERLINE = BenchmarkColor('UNDERLINE', '\033[4m') - -def color_format(use_color, fmt_str, *args, **kwargs): - """ - Return the result of 'fmt_str.format(*args, **kwargs)' after transforming - 'args' and 'kwargs' according to the value of 'use_color'. If 'use_color' - is False then all color codes in 'args' and 'kwargs' are replaced with - the empty string. - """ - assert use_color is True or use_color is False - if not use_color: - args = [arg if not isinstance(arg, BenchmarkColor) else BC_NONE - for arg in args] - kwargs = {key: arg if not isinstance(arg, BenchmarkColor) else BC_NONE - for key, arg in kwargs.items()} - return fmt_str.format(*args, **kwargs) - - -def find_longest_name(benchmark_list): - """ - Return the length of the longest benchmark name in a given list of - benchmark JSON objects - """ - longest_name = 1 - for bc in benchmark_list: - if len(bc['name']) > longest_name: - longest_name = len(bc['name']) - return longest_name - - -def calculate_change(old_val, new_val): - """ - Return a float representing the decimal change between old_val and new_val. - """ - if old_val == 0 and new_val == 0: - return 0.0 - if old_val == 0: - return float(new_val - old_val) / (float(old_val + new_val) / 2) - return float(new_val - old_val) / abs(old_val) - - -def filter_benchmark(json_orig, family, replacement=""): - """ - Apply a filter to the json, and only leave the 'family' of benchmarks. - """ - regex = re.compile(family) - filtered = {} - filtered['benchmarks'] = [] - for be in json_orig['benchmarks']: - if not regex.search(be['name']): - continue - filteredbench = copy.deepcopy(be) # Do NOT modify the old name! - filteredbench['name'] = regex.sub(replacement, filteredbench['name']) - filtered['benchmarks'].append(filteredbench) - return filtered - - -def generate_difference_report(json1, json2, use_color=True): - """ - Calculate and report the difference between each test of two benchmarks - runs specified as 'json1' and 'json2'. - """ - first_col_width = find_longest_name(json1['benchmarks']) - def find_test(name): - for b in json2['benchmarks']: - if b['name'] == name: - return b - return None - first_col_width = max(first_col_width, len('Benchmark')) - first_line = "{:<{}s}Time CPU Time Old Time New CPU Old CPU New".format( - 'Benchmark', 12 + first_col_width) - output_strs = [first_line, '-' * len(first_line)] - - gen = (bn for bn in json1['benchmarks'] if 'real_time' in bn and 'cpu_time' in bn) - for bn in gen: - other_bench = find_test(bn['name']) - if not other_bench: - continue - - if bn['time_unit'] != other_bench['time_unit']: - continue - - def get_color(res): - if res > 0.05: - return BC_FAIL - elif res > -0.07: - return BC_WHITE - else: - return BC_CYAN - fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}" - tres = calculate_change(bn['real_time'], other_bench['real_time']) - cpures = calculate_change(bn['cpu_time'], other_bench['cpu_time']) - output_strs += [color_format(use_color, fmt_str, - BC_HEADER, bn['name'], first_col_width, - get_color(tres), tres, get_color(cpures), cpures, - bn['real_time'], other_bench['real_time'], - bn['cpu_time'], other_bench['cpu_time'], - endc=BC_ENDC)] - return output_strs - -############################################################################### -# Unit tests - -import unittest - -class TestReportDifference(unittest.TestCase): - def load_results(self): - import json - testInputs = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Inputs') - testOutput1 = os.path.join(testInputs, 'test1_run1.json') - testOutput2 = os.path.join(testInputs, 'test1_run2.json') - with open(testOutput1, 'r') as f: - json1 = json.load(f) - with open(testOutput2, 'r') as f: - json2 = json.load(f) - return json1, json2 - - def test_basic(self): - expect_lines = [ - ['BM_SameTimes', '+0.0000', '+0.0000', '10', '10', '10', '10'], - ['BM_2xFaster', '-0.5000', '-0.5000', '50', '25', '50', '25'], - ['BM_2xSlower', '+1.0000', '+1.0000', '50', '100', '50', '100'], - ['BM_1PercentFaster', '-0.0100', '-0.0100', '100', '99', '100', '99'], - ['BM_1PercentSlower', '+0.0100', '+0.0100', '100', '101', '100', '101'], - ['BM_10PercentFaster', '-0.1000', '-0.1000', '100', '90', '100', '90'], - ['BM_10PercentSlower', '+0.1000', '+0.1000', '100', '110', '100', '110'], - ['BM_100xSlower', '+99.0000', '+99.0000', '100', '10000', '100', '10000'], - ['BM_100xFaster', '-0.9900', '-0.9900', '10000', '100', '10000', '100'], - ['BM_10PercentCPUToTime', '+0.1000', '-0.1000', '100', '110', '100', '90'], - ['BM_ThirdFaster', '-0.3333', '-0.3334', '100', '67', '100', '67'], - ['BM_BadTimeUnit', '-0.9000', '+0.2000', '0', '0', '0', '1'], - ] - json1, json2 = self.load_results() - output_lines_with_header = generate_difference_report(json1, json2, use_color=False) - output_lines = output_lines_with_header[2:] - print("\n".join(output_lines_with_header)) - self.assertEqual(len(output_lines), len(expect_lines)) - for i in range(0, len(output_lines)): - parts = [x for x in output_lines[i].split(' ') if x] - self.assertEqual(len(parts), 7) - self.assertEqual(parts, expect_lines[i]) - - -class TestReportDifferenceBetweenFamilies(unittest.TestCase): - def load_result(self): - import json - testInputs = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Inputs') - testOutput = os.path.join(testInputs, 'test2_run.json') - with open(testOutput, 'r') as f: - json = json.load(f) - return json - - def test_basic(self): - expect_lines = [ - ['.', '-0.5000', '-0.5000', '10', '5', '10', '5'], - ['./4', '-0.5000', '-0.5000', '40', '20', '40', '20'], - ['Prefix/.', '-0.5000', '-0.5000', '20', '10', '20', '10'], - ['Prefix/./3', '-0.5000', '-0.5000', '30', '15', '30', '15'], - ] - json = self.load_result() - json1 = filter_benchmark(json, "BM_Z.ro", ".") - json2 = filter_benchmark(json, "BM_O.e", ".") - output_lines_with_header = generate_difference_report(json1, json2, use_color=False) - output_lines = output_lines_with_header[2:] - print("\n") - print("\n".join(output_lines_with_header)) - self.assertEqual(len(output_lines), len(expect_lines)) - for i in range(0, len(output_lines)): - parts = [x for x in output_lines[i].split(' ') if x] - self.assertEqual(len(parts), 7) - self.assertEqual(parts, expect_lines[i]) - - -if __name__ == '__main__': - unittest.main() - -# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 -# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off; -# kate: indent-mode python; remove-trailing-spaces modified; diff --git a/benchmarks/thirdparty/benchmark/tools/gbench/util.py b/benchmarks/thirdparty/benchmark/tools/gbench/util.py deleted file mode 100755 index 07c2377275..0000000000 --- a/benchmarks/thirdparty/benchmark/tools/gbench/util.py +++ /dev/null @@ -1,159 +0,0 @@ -"""util.py - General utilities for running, loading, and processing benchmarks -""" -import json -import os -import tempfile -import subprocess -import sys - -# Input file type enumeration -IT_Invalid = 0 -IT_JSON = 1 -IT_Executable = 2 - -_num_magic_bytes = 2 if sys.platform.startswith('win') else 4 -def is_executable_file(filename): - """ - Return 'True' if 'filename' names a valid file which is likely - an executable. A file is considered an executable if it starts with the - magic bytes for a EXE, Mach O, or ELF file. - """ - if not os.path.isfile(filename): - return False - with open(filename, mode='rb') as f: - magic_bytes = f.read(_num_magic_bytes) - if sys.platform == 'darwin': - return magic_bytes in [ - b'\xfe\xed\xfa\xce', # MH_MAGIC - b'\xce\xfa\xed\xfe', # MH_CIGAM - b'\xfe\xed\xfa\xcf', # MH_MAGIC_64 - b'\xcf\xfa\xed\xfe', # MH_CIGAM_64 - b'\xca\xfe\xba\xbe', # FAT_MAGIC - b'\xbe\xba\xfe\xca' # FAT_CIGAM - ] - elif sys.platform.startswith('win'): - return magic_bytes == b'MZ' - else: - return magic_bytes == b'\x7FELF' - - -def is_json_file(filename): - """ - Returns 'True' if 'filename' names a valid JSON output file. - 'False' otherwise. - """ - try: - with open(filename, 'r') as f: - json.load(f) - return True - except: - pass - return False - - -def classify_input_file(filename): - """ - Return a tuple (type, msg) where 'type' specifies the classified type - of 'filename'. If 'type' is 'IT_Invalid' then 'msg' is a human readable - string represeting the error. - """ - ftype = IT_Invalid - err_msg = None - if not os.path.exists(filename): - err_msg = "'%s' does not exist" % filename - elif not os.path.isfile(filename): - err_msg = "'%s' does not name a file" % filename - elif is_executable_file(filename): - ftype = IT_Executable - elif is_json_file(filename): - ftype = IT_JSON - else: - err_msg = "'%s' does not name a valid benchmark executable or JSON file" % filename - return ftype, err_msg - - -def check_input_file(filename): - """ - Classify the file named by 'filename' and return the classification. - If the file is classified as 'IT_Invalid' print an error message and exit - the program. - """ - ftype, msg = classify_input_file(filename) - if ftype == IT_Invalid: - print("Invalid input file: %s" % msg) - sys.exit(1) - return ftype - -def find_benchmark_flag(prefix, benchmark_flags): - """ - Search the specified list of flags for a flag matching `` and - if it is found return the arg it specifies. If specified more than once the - last value is returned. If the flag is not found None is returned. - """ - assert prefix.startswith('--') and prefix.endswith('=') - result = None - for f in benchmark_flags: - if f.startswith(prefix): - result = f[len(prefix):] - return result - -def remove_benchmark_flags(prefix, benchmark_flags): - """ - Return a new list containing the specified benchmark_flags except those - with the specified prefix. - """ - assert prefix.startswith('--') and prefix.endswith('=') - return [f for f in benchmark_flags if not f.startswith(prefix)] - -def load_benchmark_results(fname): - """ - Read benchmark output from a file and return the JSON object. - REQUIRES: 'fname' names a file containing JSON benchmark output. - """ - with open(fname, 'r') as f: - return json.load(f) - - -def run_benchmark(exe_name, benchmark_flags): - """ - Run a benchmark specified by 'exe_name' with the specified - 'benchmark_flags'. The benchmark is run directly as a subprocess to preserve - real time console output. - RETURNS: A JSON object representing the benchmark output - """ - output_name = find_benchmark_flag('--benchmark_out=', - benchmark_flags) - is_temp_output = False - if output_name is None: - is_temp_output = True - thandle, output_name = tempfile.mkstemp() - os.close(thandle) - benchmark_flags = list(benchmark_flags) + \ - ['--benchmark_out=%s' % output_name] - - cmd = [exe_name] + benchmark_flags - print("RUNNING: %s" % ' '.join(cmd)) - exitCode = subprocess.call(cmd) - if exitCode != 0: - print('TEST FAILED...') - sys.exit(exitCode) - json_res = load_benchmark_results(output_name) - if is_temp_output: - os.unlink(output_name) - return json_res - - -def run_or_load_benchmark(filename, benchmark_flags): - """ - Get the results for a specified benchmark. If 'filename' specifies - an executable benchmark then the results are generated by running the - benchmark. Otherwise 'filename' must name a valid JSON output file, - which is loaded and the result returned. - """ - ftype = check_input_file(filename) - if ftype == IT_JSON: - return load_benchmark_results(filename) - elif ftype == IT_Executable: - return run_benchmark(filename, benchmark_flags) - else: - assert False # This branch is unreachable \ No newline at end of file diff --git a/benchmarks/thirdparty/benchmark/tools/strip_asm.py b/benchmarks/thirdparty/benchmark/tools/strip_asm.py deleted file mode 100755 index 9030550b43..0000000000 --- a/benchmarks/thirdparty/benchmark/tools/strip_asm.py +++ /dev/null @@ -1,151 +0,0 @@ -#!/usr/bin/env python - -""" -strip_asm.py - Cleanup ASM output for the specified file -""" - -from argparse import ArgumentParser -import sys -import os -import re - -def find_used_labels(asm): - found = set() - label_re = re.compile("\s*j[a-z]+\s+\.L([a-zA-Z0-9][a-zA-Z0-9_]*)") - for l in asm.splitlines(): - m = label_re.match(l) - if m: - found.add('.L%s' % m.group(1)) - return found - - -def normalize_labels(asm): - decls = set() - label_decl = re.compile("^[.]{0,1}L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)") - for l in asm.splitlines(): - m = label_decl.match(l) - if m: - decls.add(m.group(0)) - if len(decls) == 0: - return asm - needs_dot = next(iter(decls))[0] != '.' - if not needs_dot: - return asm - for ld in decls: - asm = re.sub("(^|\s+)" + ld + "(?=:|\s)", '\\1.' + ld, asm) - return asm - - -def transform_labels(asm): - asm = normalize_labels(asm) - used_decls = find_used_labels(asm) - new_asm = '' - label_decl = re.compile("^\.L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)") - for l in asm.splitlines(): - m = label_decl.match(l) - if not m or m.group(0) in used_decls: - new_asm += l - new_asm += '\n' - return new_asm - - -def is_identifier(tk): - if len(tk) == 0: - return False - first = tk[0] - if not first.isalpha() and first != '_': - return False - for i in range(1, len(tk)): - c = tk[i] - if not c.isalnum() and c != '_': - return False - return True - -def process_identifiers(l): - """ - process_identifiers - process all identifiers and modify them to have - consistent names across all platforms; specifically across ELF and MachO. - For example, MachO inserts an additional understore at the beginning of - names. This function removes that. - """ - parts = re.split(r'([a-zA-Z0-9_]+)', l) - new_line = '' - for tk in parts: - if is_identifier(tk): - if tk.startswith('__Z'): - tk = tk[1:] - elif tk.startswith('_') and len(tk) > 1 and \ - tk[1].isalpha() and tk[1] != 'Z': - tk = tk[1:] - new_line += tk - return new_line - - -def process_asm(asm): - """ - Strip the ASM of unwanted directives and lines - """ - new_contents = '' - asm = transform_labels(asm) - - # TODO: Add more things we want to remove - discard_regexes = [ - re.compile("\s+\..*$"), # directive - re.compile("\s*#(NO_APP|APP)$"), #inline ASM - re.compile("\s*#.*$"), # comment line - re.compile("\s*\.globa?l\s*([.a-zA-Z_][a-zA-Z0-9$_.]*)"), #global directive - re.compile("\s*\.(string|asciz|ascii|[1248]?byte|short|word|long|quad|value|zero)"), - ] - keep_regexes = [ - - ] - fn_label_def = re.compile("^[a-zA-Z_][a-zA-Z0-9_.]*:") - for l in asm.splitlines(): - # Remove Mach-O attribute - l = l.replace('@GOTPCREL', '') - add_line = True - for reg in discard_regexes: - if reg.match(l) is not None: - add_line = False - break - for reg in keep_regexes: - if reg.match(l) is not None: - add_line = True - break - if add_line: - if fn_label_def.match(l) and len(new_contents) != 0: - new_contents += '\n' - l = process_identifiers(l) - new_contents += l - new_contents += '\n' - return new_contents - -def main(): - parser = ArgumentParser( - description='generate a stripped assembly file') - parser.add_argument( - 'input', metavar='input', type=str, nargs=1, - help='An input assembly file') - parser.add_argument( - 'out', metavar='output', type=str, nargs=1, - help='The output file') - args, unknown_args = parser.parse_known_args() - input = args.input[0] - output = args.out[0] - if not os.path.isfile(input): - print(("ERROR: input file '%s' does not exist") % input) - sys.exit(1) - contents = None - with open(input, 'r') as f: - contents = f.read() - new_contents = process_asm(contents) - with open(output, 'w') as f: - f.write(new_contents) - - -if __name__ == '__main__': - main() - -# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 -# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off; -# kate: indent-mode python; remove-trailing-spaces modified; From c092de131ddf46c6ad9ebff7bfd703477e25bec3 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sat, 23 Jan 2021 13:59:52 +0100 Subject: [PATCH 027/113] :arrow_up: Google Benchmark 1.5.2 --- benchmarks/thirdparty/benchmark/.clang-format | 5 + .../.github/ISSUE_TEMPLATE/bug_report.md | 32 + .../.github/ISSUE_TEMPLATE/feature_request.md | 20 + .../.github/workflows/build-and-test.yml | 38 + .../benchmark/.github/workflows/pylint.yml | 26 + .../.github/workflows/test_bindings.yml | 24 + benchmarks/thirdparty/benchmark/.gitignore | 66 + .../benchmark/.travis-libcxx-setup.sh | 28 + benchmarks/thirdparty/benchmark/.travis.yml | 231 +++ .../thirdparty/benchmark/.ycm_extra_conf.py | 115 ++ benchmarks/thirdparty/benchmark/AUTHORS | 57 + benchmarks/thirdparty/benchmark/BUILD.bazel | 44 + .../thirdparty/benchmark/CMakeLists.txt | 286 +++ .../thirdparty/benchmark/CONTRIBUTING.md | 58 + benchmarks/thirdparty/benchmark/CONTRIBUTORS | 79 + benchmarks/thirdparty/benchmark/LICENSE | 202 +++ benchmarks/thirdparty/benchmark/README.md | 1319 ++++++++++++++ benchmarks/thirdparty/benchmark/WORKSPACE | 36 + benchmarks/thirdparty/benchmark/_config.yml | 1 + benchmarks/thirdparty/benchmark/appveyor.yml | 50 + .../benchmark/bindings/python/build_defs.bzl | 25 + .../python/google_benchmark/__init__.py | 156 ++ .../python/google_benchmark/benchmark.cc | 180 ++ .../python/google_benchmark/example.py | 136 ++ .../benchmark/bindings/python/pybind11.BUILD | 20 + .../bindings/python/python_headers.BUILD | 6 + .../bindings/python/requirements.txt | 2 + .../benchmark/cmake/AddCXXCompilerFlag.cmake | 74 + .../benchmark/cmake/CXXFeatureCheck.cmake | 69 + .../benchmark/cmake/Config.cmake.in | 1 + .../benchmark/cmake/GetGitVersion.cmake | 54 + .../benchmark/cmake/GoogleTest.cmake | 41 + .../benchmark/cmake/GoogleTest.cmake.in | 58 + .../benchmark/cmake/benchmark.pc.in | 12 + .../benchmark/cmake/gnu_posix_regex.cpp | 12 + .../benchmark/cmake/llvm-toolchain.cmake | 8 + .../benchmark/cmake/posix_regex.cpp | 14 + .../benchmark/cmake/split_list.cmake | 3 + .../thirdparty/benchmark/cmake/std_regex.cpp | 10 + .../benchmark/cmake/steady_clock.cpp | 7 + .../cmake/thread_safety_attributes.cpp | 4 + .../thirdparty/benchmark/conan/CMakeLists.txt | 7 + .../conan/test_package/CMakeLists.txt | 10 + .../benchmark/conan/test_package/conanfile.py | 19 + .../conan/test_package/test_package.cpp | 18 + benchmarks/thirdparty/benchmark/conanfile.py | 79 + .../thirdparty/benchmark/dependencies.md | 18 + .../benchmark/docs/AssemblyTests.md | 147 ++ .../thirdparty/benchmark/docs/_config.yml | 1 + .../thirdparty/benchmark/docs/releasing.md | 16 + benchmarks/thirdparty/benchmark/docs/tools.md | 203 +++ .../benchmark/include/benchmark/benchmark.h | 1601 +++++++++++++++++ benchmarks/thirdparty/benchmark/setup.py | 140 ++ .../thirdparty/benchmark/src/CMakeLists.txt | 114 ++ .../thirdparty/benchmark/src/arraysize.h | 33 + .../thirdparty/benchmark/src/benchmark.cc | 499 +++++ .../benchmark/src/benchmark_api_internal.cc | 15 + .../benchmark/src/benchmark_api_internal.h | 53 + .../benchmark/src/benchmark_main.cc | 17 + .../benchmark/src/benchmark_name.cc | 58 + .../benchmark/src/benchmark_register.cc | 515 ++++++ .../benchmark/src/benchmark_register.h | 107 ++ .../benchmark/src/benchmark_runner.cc | 362 ++++ .../benchmark/src/benchmark_runner.h | 51 + benchmarks/thirdparty/benchmark/src/check.h | 82 + .../thirdparty/benchmark/src/colorprint.cc | 188 ++ .../thirdparty/benchmark/src/colorprint.h | 33 + .../benchmark/src/commandlineflags.cc | 228 +++ .../benchmark/src/commandlineflags.h | 103 ++ .../thirdparty/benchmark/src/complexity.cc | 238 +++ .../thirdparty/benchmark/src/complexity.h | 55 + .../benchmark/src/console_reporter.cc | 177 ++ .../thirdparty/benchmark/src/counter.cc | 80 + benchmarks/thirdparty/benchmark/src/counter.h | 32 + .../thirdparty/benchmark/src/csv_reporter.cc | 154 ++ .../thirdparty/benchmark/src/cycleclock.h | 206 +++ .../benchmark/src/internal_macros.h | 94 + .../thirdparty/benchmark/src/json_reporter.cc | 255 +++ benchmarks/thirdparty/benchmark/src/log.h | 74 + benchmarks/thirdparty/benchmark/src/mutex.h | 155 ++ benchmarks/thirdparty/benchmark/src/re.h | 158 ++ .../thirdparty/benchmark/src/reporter.cc | 105 ++ benchmarks/thirdparty/benchmark/src/sleep.cc | 51 + benchmarks/thirdparty/benchmark/src/sleep.h | 15 + .../thirdparty/benchmark/src/statistics.cc | 193 ++ .../thirdparty/benchmark/src/statistics.h | 37 + .../thirdparty/benchmark/src/string_util.cc | 255 +++ .../thirdparty/benchmark/src/string_util.h | 59 + .../thirdparty/benchmark/src/sysinfo.cc | 712 ++++++++ .../thirdparty/benchmark/src/thread_manager.h | 64 + .../thirdparty/benchmark/src/thread_timer.h | 86 + benchmarks/thirdparty/benchmark/src/timers.cc | 244 +++ benchmarks/thirdparty/benchmark/src/timers.h | 48 + .../benchmark/test/AssemblyTests.cmake | 46 + .../thirdparty/benchmark/test/CMakeLists.txt | 263 +++ .../benchmark/test/args_product_test.cc | 77 + .../thirdparty/benchmark/test/basic_test.cc | 136 ++ .../benchmark/test/benchmark_gtest.cc | 128 ++ .../benchmark/test/benchmark_name_gtest.cc | 74 + .../benchmark/test/benchmark_test.cc | 245 +++ .../test/clobber_memory_assembly_test.cc | 64 + .../benchmark/test/commandlineflags_gtest.cc | 201 +++ .../benchmark/test/complexity_test.cc | 213 +++ .../thirdparty/benchmark/test/cxx03_test.cc | 63 + .../benchmark/test/diagnostics_test.cc | 80 + .../test/display_aggregates_only_test.cc | 43 + .../test/donotoptimize_assembly_test.cc | 163 ++ .../benchmark/test/donotoptimize_test.cc | 52 + .../thirdparty/benchmark/test/filter_test.cc | 104 ++ .../thirdparty/benchmark/test/fixture_test.cc | 49 + .../benchmark/test/internal_threading_test.cc | 184 ++ .../benchmark/test/link_main_test.cc | 8 + .../thirdparty/benchmark/test/map_test.cc | 57 + .../benchmark/test/memory_manager_test.cc | 44 + .../benchmark/test/multiple_ranges_test.cc | 96 + .../thirdparty/benchmark/test/options_test.cc | 75 + .../thirdparty/benchmark/test/output_test.h | 213 +++ .../benchmark/test/output_test_helper.cc | 515 ++++++ .../benchmark/test/register_benchmark_test.cc | 184 ++ .../test/report_aggregates_only_test.cc | 39 + .../benchmark/test/reporter_output_test.cc | 747 ++++++++ .../benchmark/test/skip_with_error_test.cc | 195 ++ .../benchmark/test/state_assembly_test.cc | 68 + .../benchmark/test/statistics_gtest.cc | 28 + .../benchmark/test/string_util_gtest.cc | 153 ++ .../benchmark/test/templated_fixture_test.cc | 28 + .../test/user_counters_tabular_test.cc | 285 +++ .../benchmark/test/user_counters_test.cc | 531 ++++++ .../test/user_counters_thousands_test.cc | 173 ++ .../thirdparty/benchmark/tools/compare.py | 416 +++++ .../tools/gbench/Inputs/test1_run1.json | 119 ++ .../tools/gbench/Inputs/test1_run2.json | 119 ++ .../tools/gbench/Inputs/test2_run.json | 81 + .../tools/gbench/Inputs/test3_run0.json | 65 + .../tools/gbench/Inputs/test3_run1.json | 65 + .../benchmark/tools/gbench/__init__.py | 8 + .../benchmark/tools/gbench/report.py | 541 ++++++ .../thirdparty/benchmark/tools/gbench/util.py | 163 ++ .../benchmark/tools/requirements.txt | 1 + .../thirdparty/benchmark/tools/strip_asm.py | 151 ++ 140 files changed, 19226 insertions(+) create mode 100755 benchmarks/thirdparty/benchmark/.clang-format create mode 100755 benchmarks/thirdparty/benchmark/.github/ISSUE_TEMPLATE/bug_report.md create mode 100755 benchmarks/thirdparty/benchmark/.github/ISSUE_TEMPLATE/feature_request.md create mode 100755 benchmarks/thirdparty/benchmark/.github/workflows/build-and-test.yml create mode 100755 benchmarks/thirdparty/benchmark/.github/workflows/pylint.yml create mode 100755 benchmarks/thirdparty/benchmark/.github/workflows/test_bindings.yml create mode 100755 benchmarks/thirdparty/benchmark/.gitignore create mode 100755 benchmarks/thirdparty/benchmark/.travis-libcxx-setup.sh create mode 100755 benchmarks/thirdparty/benchmark/.travis.yml create mode 100755 benchmarks/thirdparty/benchmark/.ycm_extra_conf.py create mode 100755 benchmarks/thirdparty/benchmark/AUTHORS create mode 100755 benchmarks/thirdparty/benchmark/BUILD.bazel create mode 100755 benchmarks/thirdparty/benchmark/CMakeLists.txt create mode 100755 benchmarks/thirdparty/benchmark/CONTRIBUTING.md create mode 100755 benchmarks/thirdparty/benchmark/CONTRIBUTORS create mode 100755 benchmarks/thirdparty/benchmark/LICENSE create mode 100755 benchmarks/thirdparty/benchmark/README.md create mode 100755 benchmarks/thirdparty/benchmark/WORKSPACE create mode 100755 benchmarks/thirdparty/benchmark/_config.yml create mode 100755 benchmarks/thirdparty/benchmark/appveyor.yml create mode 100755 benchmarks/thirdparty/benchmark/bindings/python/build_defs.bzl create mode 100755 benchmarks/thirdparty/benchmark/bindings/python/google_benchmark/__init__.py create mode 100755 benchmarks/thirdparty/benchmark/bindings/python/google_benchmark/benchmark.cc create mode 100755 benchmarks/thirdparty/benchmark/bindings/python/google_benchmark/example.py create mode 100755 benchmarks/thirdparty/benchmark/bindings/python/pybind11.BUILD create mode 100755 benchmarks/thirdparty/benchmark/bindings/python/python_headers.BUILD create mode 100755 benchmarks/thirdparty/benchmark/bindings/python/requirements.txt create mode 100755 benchmarks/thirdparty/benchmark/cmake/AddCXXCompilerFlag.cmake create mode 100755 benchmarks/thirdparty/benchmark/cmake/CXXFeatureCheck.cmake create mode 100755 benchmarks/thirdparty/benchmark/cmake/Config.cmake.in create mode 100755 benchmarks/thirdparty/benchmark/cmake/GetGitVersion.cmake create mode 100755 benchmarks/thirdparty/benchmark/cmake/GoogleTest.cmake create mode 100755 benchmarks/thirdparty/benchmark/cmake/GoogleTest.cmake.in create mode 100755 benchmarks/thirdparty/benchmark/cmake/benchmark.pc.in create mode 100755 benchmarks/thirdparty/benchmark/cmake/gnu_posix_regex.cpp create mode 100755 benchmarks/thirdparty/benchmark/cmake/llvm-toolchain.cmake create mode 100755 benchmarks/thirdparty/benchmark/cmake/posix_regex.cpp create mode 100755 benchmarks/thirdparty/benchmark/cmake/split_list.cmake create mode 100755 benchmarks/thirdparty/benchmark/cmake/std_regex.cpp create mode 100755 benchmarks/thirdparty/benchmark/cmake/steady_clock.cpp create mode 100755 benchmarks/thirdparty/benchmark/cmake/thread_safety_attributes.cpp create mode 100755 benchmarks/thirdparty/benchmark/conan/CMakeLists.txt create mode 100755 benchmarks/thirdparty/benchmark/conan/test_package/CMakeLists.txt create mode 100755 benchmarks/thirdparty/benchmark/conan/test_package/conanfile.py create mode 100755 benchmarks/thirdparty/benchmark/conan/test_package/test_package.cpp create mode 100755 benchmarks/thirdparty/benchmark/conanfile.py create mode 100755 benchmarks/thirdparty/benchmark/dependencies.md create mode 100755 benchmarks/thirdparty/benchmark/docs/AssemblyTests.md create mode 100755 benchmarks/thirdparty/benchmark/docs/_config.yml create mode 100755 benchmarks/thirdparty/benchmark/docs/releasing.md create mode 100755 benchmarks/thirdparty/benchmark/docs/tools.md create mode 100755 benchmarks/thirdparty/benchmark/include/benchmark/benchmark.h create mode 100755 benchmarks/thirdparty/benchmark/setup.py create mode 100755 benchmarks/thirdparty/benchmark/src/CMakeLists.txt create mode 100755 benchmarks/thirdparty/benchmark/src/arraysize.h create mode 100755 benchmarks/thirdparty/benchmark/src/benchmark.cc create mode 100755 benchmarks/thirdparty/benchmark/src/benchmark_api_internal.cc create mode 100755 benchmarks/thirdparty/benchmark/src/benchmark_api_internal.h create mode 100755 benchmarks/thirdparty/benchmark/src/benchmark_main.cc create mode 100755 benchmarks/thirdparty/benchmark/src/benchmark_name.cc create mode 100755 benchmarks/thirdparty/benchmark/src/benchmark_register.cc create mode 100755 benchmarks/thirdparty/benchmark/src/benchmark_register.h create mode 100755 benchmarks/thirdparty/benchmark/src/benchmark_runner.cc create mode 100755 benchmarks/thirdparty/benchmark/src/benchmark_runner.h create mode 100755 benchmarks/thirdparty/benchmark/src/check.h create mode 100755 benchmarks/thirdparty/benchmark/src/colorprint.cc create mode 100755 benchmarks/thirdparty/benchmark/src/colorprint.h create mode 100755 benchmarks/thirdparty/benchmark/src/commandlineflags.cc create mode 100755 benchmarks/thirdparty/benchmark/src/commandlineflags.h create mode 100755 benchmarks/thirdparty/benchmark/src/complexity.cc create mode 100755 benchmarks/thirdparty/benchmark/src/complexity.h create mode 100755 benchmarks/thirdparty/benchmark/src/console_reporter.cc create mode 100755 benchmarks/thirdparty/benchmark/src/counter.cc create mode 100755 benchmarks/thirdparty/benchmark/src/counter.h create mode 100755 benchmarks/thirdparty/benchmark/src/csv_reporter.cc create mode 100755 benchmarks/thirdparty/benchmark/src/cycleclock.h create mode 100755 benchmarks/thirdparty/benchmark/src/internal_macros.h create mode 100755 benchmarks/thirdparty/benchmark/src/json_reporter.cc create mode 100755 benchmarks/thirdparty/benchmark/src/log.h create mode 100755 benchmarks/thirdparty/benchmark/src/mutex.h create mode 100755 benchmarks/thirdparty/benchmark/src/re.h create mode 100755 benchmarks/thirdparty/benchmark/src/reporter.cc create mode 100755 benchmarks/thirdparty/benchmark/src/sleep.cc create mode 100755 benchmarks/thirdparty/benchmark/src/sleep.h create mode 100755 benchmarks/thirdparty/benchmark/src/statistics.cc create mode 100755 benchmarks/thirdparty/benchmark/src/statistics.h create mode 100755 benchmarks/thirdparty/benchmark/src/string_util.cc create mode 100755 benchmarks/thirdparty/benchmark/src/string_util.h create mode 100755 benchmarks/thirdparty/benchmark/src/sysinfo.cc create mode 100755 benchmarks/thirdparty/benchmark/src/thread_manager.h create mode 100755 benchmarks/thirdparty/benchmark/src/thread_timer.h create mode 100755 benchmarks/thirdparty/benchmark/src/timers.cc create mode 100755 benchmarks/thirdparty/benchmark/src/timers.h create mode 100755 benchmarks/thirdparty/benchmark/test/AssemblyTests.cmake create mode 100755 benchmarks/thirdparty/benchmark/test/CMakeLists.txt create mode 100755 benchmarks/thirdparty/benchmark/test/args_product_test.cc create mode 100755 benchmarks/thirdparty/benchmark/test/basic_test.cc create mode 100755 benchmarks/thirdparty/benchmark/test/benchmark_gtest.cc create mode 100755 benchmarks/thirdparty/benchmark/test/benchmark_name_gtest.cc create mode 100755 benchmarks/thirdparty/benchmark/test/benchmark_test.cc create mode 100755 benchmarks/thirdparty/benchmark/test/clobber_memory_assembly_test.cc create mode 100755 benchmarks/thirdparty/benchmark/test/commandlineflags_gtest.cc create mode 100755 benchmarks/thirdparty/benchmark/test/complexity_test.cc create mode 100755 benchmarks/thirdparty/benchmark/test/cxx03_test.cc create mode 100755 benchmarks/thirdparty/benchmark/test/diagnostics_test.cc create mode 100755 benchmarks/thirdparty/benchmark/test/display_aggregates_only_test.cc create mode 100755 benchmarks/thirdparty/benchmark/test/donotoptimize_assembly_test.cc create mode 100755 benchmarks/thirdparty/benchmark/test/donotoptimize_test.cc create mode 100755 benchmarks/thirdparty/benchmark/test/filter_test.cc create mode 100755 benchmarks/thirdparty/benchmark/test/fixture_test.cc create mode 100755 benchmarks/thirdparty/benchmark/test/internal_threading_test.cc create mode 100755 benchmarks/thirdparty/benchmark/test/link_main_test.cc create mode 100755 benchmarks/thirdparty/benchmark/test/map_test.cc create mode 100755 benchmarks/thirdparty/benchmark/test/memory_manager_test.cc create mode 100755 benchmarks/thirdparty/benchmark/test/multiple_ranges_test.cc create mode 100755 benchmarks/thirdparty/benchmark/test/options_test.cc create mode 100755 benchmarks/thirdparty/benchmark/test/output_test.h create mode 100755 benchmarks/thirdparty/benchmark/test/output_test_helper.cc create mode 100755 benchmarks/thirdparty/benchmark/test/register_benchmark_test.cc create mode 100755 benchmarks/thirdparty/benchmark/test/report_aggregates_only_test.cc create mode 100755 benchmarks/thirdparty/benchmark/test/reporter_output_test.cc create mode 100755 benchmarks/thirdparty/benchmark/test/skip_with_error_test.cc create mode 100755 benchmarks/thirdparty/benchmark/test/state_assembly_test.cc create mode 100755 benchmarks/thirdparty/benchmark/test/statistics_gtest.cc create mode 100755 benchmarks/thirdparty/benchmark/test/string_util_gtest.cc create mode 100755 benchmarks/thirdparty/benchmark/test/templated_fixture_test.cc create mode 100755 benchmarks/thirdparty/benchmark/test/user_counters_tabular_test.cc create mode 100755 benchmarks/thirdparty/benchmark/test/user_counters_test.cc create mode 100755 benchmarks/thirdparty/benchmark/test/user_counters_thousands_test.cc create mode 100755 benchmarks/thirdparty/benchmark/tools/compare.py create mode 100755 benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test1_run1.json create mode 100755 benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test1_run2.json create mode 100755 benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test2_run.json create mode 100755 benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test3_run0.json create mode 100755 benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test3_run1.json create mode 100755 benchmarks/thirdparty/benchmark/tools/gbench/__init__.py create mode 100755 benchmarks/thirdparty/benchmark/tools/gbench/report.py create mode 100755 benchmarks/thirdparty/benchmark/tools/gbench/util.py create mode 100755 benchmarks/thirdparty/benchmark/tools/requirements.txt create mode 100755 benchmarks/thirdparty/benchmark/tools/strip_asm.py diff --git a/benchmarks/thirdparty/benchmark/.clang-format b/benchmarks/thirdparty/benchmark/.clang-format new file mode 100755 index 0000000000..e7d00feaa0 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/.clang-format @@ -0,0 +1,5 @@ +--- +Language: Cpp +BasedOnStyle: Google +PointerAlignment: Left +... diff --git a/benchmarks/thirdparty/benchmark/.github/ISSUE_TEMPLATE/bug_report.md b/benchmarks/thirdparty/benchmark/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100755 index 0000000000..6c2ced9b2e --- /dev/null +++ b/benchmarks/thirdparty/benchmark/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,32 @@ +--- +name: Bug report +about: Create a report to help us improve +title: "[BUG]" +labels: '' +assignees: '' + +--- + +**Describe the bug** +A clear and concise description of what the bug is. + +**System** +Which OS, compiler, and compiler version are you using: + - OS: + - Compiler and version: + +**To reproduce** +Steps to reproduce the behavior: +1. sync to commit ... +2. cmake/bazel... +3. make ... +4. See error + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Screenshots** +If applicable, add screenshots to help explain your problem. + +**Additional context** +Add any other context about the problem here. diff --git a/benchmarks/thirdparty/benchmark/.github/ISSUE_TEMPLATE/feature_request.md b/benchmarks/thirdparty/benchmark/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100755 index 0000000000..9e8ab6a673 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,20 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: "[FR]" +labels: '' +assignees: '' + +--- + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Additional context** +Add any other context or screenshots about the feature request here. diff --git a/benchmarks/thirdparty/benchmark/.github/workflows/build-and-test.yml b/benchmarks/thirdparty/benchmark/.github/workflows/build-and-test.yml new file mode 100755 index 0000000000..f0f0626d74 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/.github/workflows/build-and-test.yml @@ -0,0 +1,38 @@ +name: build-and-test + +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +jobs: + job: + # TODO(dominic): Extend this to include compiler and set through env: CC/CXX. + name: ${{ matrix.os }}.${{ matrix.build_type }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, ubuntu-16.04, ubuntu-20.04, macos-latest, windows-latest] + build_type: ['Release', 'Debug'] + steps: + - uses: actions/checkout@v2 + + - name: create build environment + run: cmake -E make_directory ${{ runner.workspace }}/_build + + - name: configure cmake + shell: bash + working-directory: ${{ runner.workspace }}/_build + run: cmake -DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} + + - name: build + shell: bash + working-directory: ${{ runner.workspace }}/_build + run: cmake --build . --config ${{ matrix.build_type }} + + - name: test + shell: bash + working-directory: ${{ runner.workspace }}/_build + run: ctest -C ${{ matrix.build_type }} diff --git a/benchmarks/thirdparty/benchmark/.github/workflows/pylint.yml b/benchmarks/thirdparty/benchmark/.github/workflows/pylint.yml new file mode 100755 index 0000000000..c8696749f3 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/.github/workflows/pylint.yml @@ -0,0 +1,26 @@ +name: pylint + +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +jobs: + pylint: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + - name: Set up Python 3.8 + uses: actions/setup-python@v1 + with: + python-version: 3.8 + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install pylint pylint-exit conan + - name: Run pylint + run: | + pylint `find . -name '*.py'|xargs` || pylint-exit $? diff --git a/benchmarks/thirdparty/benchmark/.github/workflows/test_bindings.yml b/benchmarks/thirdparty/benchmark/.github/workflows/test_bindings.yml new file mode 100755 index 0000000000..273d7f93ee --- /dev/null +++ b/benchmarks/thirdparty/benchmark/.github/workflows/test_bindings.yml @@ -0,0 +1,24 @@ +name: test-bindings + +on: + push: + branches: [master] + pull_request: + branches: [master] + +jobs: + python_bindings: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v1 + with: + python-version: 3.8 + - name: Install benchmark + run: + python setup.py install + - name: Run example bindings + run: + python bindings/python/google_benchmark/example.py diff --git a/benchmarks/thirdparty/benchmark/.gitignore b/benchmarks/thirdparty/benchmark/.gitignore new file mode 100755 index 0000000000..be55d774e2 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/.gitignore @@ -0,0 +1,66 @@ +*.a +*.so +*.so.?* +*.dll +*.exe +*.dylib +*.cmake +!/cmake/*.cmake +!/test/AssemblyTests.cmake +*~ +*.swp +*.pyc +__pycache__ + +# lcov +*.lcov +/lcov + +# cmake files. +/Testing +CMakeCache.txt +CMakeFiles/ +cmake_install.cmake + +# makefiles. +Makefile + +# in-source build. +bin/ +lib/ +/test/*_test + +# exuberant ctags. +tags + +# YouCompleteMe configuration. +.ycm_extra_conf.pyc + +# ninja generated files. +.ninja_deps +.ninja_log +build.ninja +install_manifest.txt +rules.ninja + +# bazel output symlinks. +bazel-* + +# out-of-source build top-level folders. +build/ +_build/ +build*/ + +# in-source dependencies +/googletest/ + +# Visual Studio 2015/2017 cache/options directory +.vs/ +CMakeSettings.json + +# Visual Studio Code cache/options directory +.vscode/ + +# Python build stuff +dist/ +*.egg-info* diff --git a/benchmarks/thirdparty/benchmark/.travis-libcxx-setup.sh b/benchmarks/thirdparty/benchmark/.travis-libcxx-setup.sh new file mode 100755 index 0000000000..a591743c6a --- /dev/null +++ b/benchmarks/thirdparty/benchmark/.travis-libcxx-setup.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +# Install a newer CMake version +curl -sSL https://cmake.org/files/v3.6/cmake-3.6.1-Linux-x86_64.sh -o install-cmake.sh +chmod +x install-cmake.sh +sudo ./install-cmake.sh --prefix=/usr/local --skip-license + +# Checkout LLVM sources +git clone --depth=1 https://github.com/llvm-mirror/llvm.git llvm-source +git clone --depth=1 https://github.com/llvm-mirror/libcxx.git llvm-source/projects/libcxx +git clone --depth=1 https://github.com/llvm-mirror/libcxxabi.git llvm-source/projects/libcxxabi + +# Setup libc++ options +if [ -z "$BUILD_32_BITS" ]; then + export BUILD_32_BITS=OFF && echo disabling 32 bit build +fi + +# Build and install libc++ (Use unstable ABI for better sanitizer coverage) +mkdir llvm-build && cd llvm-build +cmake -DCMAKE_C_COMPILER=${C_COMPILER} -DCMAKE_CXX_COMPILER=${COMPILER} \ + -DCMAKE_BUILD_TYPE=RelWithDebInfo -DCMAKE_INSTALL_PREFIX=/usr \ + -DLIBCXX_ABI_UNSTABLE=ON \ + -DLLVM_USE_SANITIZER=${LIBCXX_SANITIZER} \ + -DLLVM_BUILD_32_BITS=${BUILD_32_BITS} \ + ../llvm-source +make cxx -j2 +sudo make install-cxxabi install-cxx +cd ../ diff --git a/benchmarks/thirdparty/benchmark/.travis.yml b/benchmarks/thirdparty/benchmark/.travis.yml new file mode 100755 index 0000000000..36e343dbfe --- /dev/null +++ b/benchmarks/thirdparty/benchmark/.travis.yml @@ -0,0 +1,231 @@ +sudo: required +dist: trusty +language: cpp + +matrix: + include: + - compiler: gcc + addons: + apt: + packages: + - lcov + env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Coverage + - compiler: gcc + env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Debug + - compiler: gcc + env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Release + - compiler: gcc + addons: + apt: + packages: + - g++-multilib + - libc6:i386 + env: + - COMPILER=g++ + - C_COMPILER=gcc + - BUILD_TYPE=Debug + - BUILD_32_BITS=ON + - EXTRA_FLAGS="-m32" + - compiler: gcc + addons: + apt: + packages: + - g++-multilib + - libc6:i386 + env: + - COMPILER=g++ + - C_COMPILER=gcc + - BUILD_TYPE=Release + - BUILD_32_BITS=ON + - EXTRA_FLAGS="-m32" + - compiler: gcc + env: + - INSTALL_GCC6_FROM_PPA=1 + - COMPILER=g++-6 C_COMPILER=gcc-6 BUILD_TYPE=Debug + - ENABLE_SANITIZER=1 + - EXTRA_FLAGS="-fno-omit-frame-pointer -g -O2 -fsanitize=undefined,address -fuse-ld=gold" + - compiler: clang + env: COMPILER=clang++ C_COMPILER=clang BUILD_TYPE=Debug + - compiler: clang + env: COMPILER=clang++ C_COMPILER=clang BUILD_TYPE=Release + # Clang w/ libc++ + - compiler: clang + dist: xenial + addons: + apt: + packages: + clang-3.8 + env: + - INSTALL_GCC6_FROM_PPA=1 + - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug + - LIBCXX_BUILD=1 + - EXTRA_CXX_FLAGS="-stdlib=libc++" + - compiler: clang + dist: xenial + addons: + apt: + packages: + clang-3.8 + env: + - INSTALL_GCC6_FROM_PPA=1 + - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Release + - LIBCXX_BUILD=1 + - EXTRA_CXX_FLAGS="-stdlib=libc++" + # Clang w/ 32bit libc++ + - compiler: clang + dist: xenial + addons: + apt: + packages: + - clang-3.8 + - g++-multilib + - libc6:i386 + env: + - INSTALL_GCC6_FROM_PPA=1 + - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug + - LIBCXX_BUILD=1 + - BUILD_32_BITS=ON + - EXTRA_FLAGS="-m32" + - EXTRA_CXX_FLAGS="-stdlib=libc++" + # Clang w/ 32bit libc++ + - compiler: clang + dist: xenial + addons: + apt: + packages: + - clang-3.8 + - g++-multilib + - libc6:i386 + env: + - INSTALL_GCC6_FROM_PPA=1 + - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Release + - LIBCXX_BUILD=1 + - BUILD_32_BITS=ON + - EXTRA_FLAGS="-m32" + - EXTRA_CXX_FLAGS="-stdlib=libc++" + # Clang w/ libc++, ASAN, UBSAN + - compiler: clang + dist: xenial + addons: + apt: + packages: + clang-3.8 + env: + - INSTALL_GCC6_FROM_PPA=1 + - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug + - LIBCXX_BUILD=1 LIBCXX_SANITIZER="Undefined;Address" + - ENABLE_SANITIZER=1 + - EXTRA_FLAGS="-g -O2 -fno-omit-frame-pointer -fsanitize=undefined,address -fno-sanitize-recover=all" + - EXTRA_CXX_FLAGS="-stdlib=libc++" + - UBSAN_OPTIONS=print_stacktrace=1 + # Clang w/ libc++ and MSAN + - compiler: clang + dist: xenial + addons: + apt: + packages: + clang-3.8 + env: + - INSTALL_GCC6_FROM_PPA=1 + - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug + - LIBCXX_BUILD=1 LIBCXX_SANITIZER=MemoryWithOrigins + - ENABLE_SANITIZER=1 + - EXTRA_FLAGS="-g -O2 -fno-omit-frame-pointer -fsanitize=memory -fsanitize-memory-track-origins" + - EXTRA_CXX_FLAGS="-stdlib=libc++" + # Clang w/ libc++ and MSAN + - compiler: clang + dist: xenial + addons: + apt: + packages: + clang-3.8 + env: + - INSTALL_GCC6_FROM_PPA=1 + - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=RelWithDebInfo + - LIBCXX_BUILD=1 LIBCXX_SANITIZER=Thread + - ENABLE_SANITIZER=1 + - EXTRA_FLAGS="-g -O2 -fno-omit-frame-pointer -fsanitize=thread -fno-sanitize-recover=all" + - EXTRA_CXX_FLAGS="-stdlib=libc++" + - os: osx + osx_image: xcode8.3 + compiler: clang + env: + - COMPILER=clang++ BUILD_TYPE=Debug + - os: osx + osx_image: xcode8.3 + compiler: clang + env: + - COMPILER=clang++ BUILD_TYPE=Release + - os: osx + osx_image: xcode8.3 + compiler: clang + env: + - COMPILER=clang++ + - BUILD_TYPE=Release + - BUILD_32_BITS=ON + - EXTRA_FLAGS="-m32" + - os: osx + osx_image: xcode9.4 + compiler: gcc + env: + - COMPILER=g++-7 C_COMPILER=gcc-7 BUILD_TYPE=Debug + +before_script: + - if [ -n "${LIBCXX_BUILD}" ]; then + source .travis-libcxx-setup.sh; + fi + - if [ -n "${ENABLE_SANITIZER}" ]; then + export EXTRA_OPTIONS="-DBENCHMARK_ENABLE_ASSEMBLY_TESTS=OFF"; + else + export EXTRA_OPTIONS=""; + fi + - mkdir -p build && cd build + +before_install: + - if [ -z "$BUILD_32_BITS" ]; then + export BUILD_32_BITS=OFF && echo disabling 32 bit build; + fi + - if [ -n "${INSTALL_GCC6_FROM_PPA}" ]; then + sudo add-apt-repository -y "ppa:ubuntu-toolchain-r/test"; + sudo apt-get update --option Acquire::Retries=100 --option Acquire::http::Timeout="60"; + fi + +install: + - if [ -n "${INSTALL_GCC6_FROM_PPA}" ]; then + travis_wait sudo -E apt-get -yq --no-install-suggests --no-install-recommends install g++-6; + fi + - if [ "${TRAVIS_OS_NAME}" == "linux" -a "${BUILD_32_BITS}" == "OFF" ]; then + travis_wait sudo -E apt-get -y --no-install-suggests --no-install-recommends install llvm-3.9-tools; + sudo cp /usr/lib/llvm-3.9/bin/FileCheck /usr/local/bin/; + fi + - if [ "${BUILD_TYPE}" == "Coverage" -a "${TRAVIS_OS_NAME}" == "linux" ]; then + PATH=~/.local/bin:${PATH}; + pip install --user --upgrade pip; + travis_wait pip install --user cpp-coveralls; + fi + - if [ "${C_COMPILER}" == "gcc-7" -a "${TRAVIS_OS_NAME}" == "osx" ]; then + rm -f /usr/local/include/c++; + brew update; + travis_wait brew install gcc@7; + fi + - if [ "${TRAVIS_OS_NAME}" == "linux" ]; then + sudo apt-get update -qq; + sudo apt-get install -qq unzip cmake3; + wget https://github.com/bazelbuild/bazel/releases/download/3.2.0/bazel-3.2.0-installer-linux-x86_64.sh --output-document bazel-installer.sh; + travis_wait sudo bash bazel-installer.sh; + fi + - if [ "${TRAVIS_OS_NAME}" == "osx" ]; then + curl -L -o bazel-installer.sh https://github.com/bazelbuild/bazel/releases/download/3.2.0/bazel-3.2.0-installer-darwin-x86_64.sh; + travis_wait sudo bash bazel-installer.sh; + fi + +script: + - cmake -DCMAKE_C_COMPILER=${C_COMPILER} -DCMAKE_CXX_COMPILER=${COMPILER} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DCMAKE_C_FLAGS="${EXTRA_FLAGS}" -DCMAKE_CXX_FLAGS="${EXTRA_FLAGS} ${EXTRA_CXX_FLAGS}" -DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON -DBENCHMARK_BUILD_32_BITS=${BUILD_32_BITS} ${EXTRA_OPTIONS} .. + - make + - ctest -C ${BUILD_TYPE} --output-on-failure + - bazel test -c dbg --define google_benchmark.have_regex=posix --announce_rc --verbose_failures --test_output=errors --keep_going //test/... + +after_success: + - if [ "${BUILD_TYPE}" == "Coverage" -a "${TRAVIS_OS_NAME}" == "linux" ]; then + coveralls --include src --include include --gcov-options '\-lp' --root .. --build-root .; + fi diff --git a/benchmarks/thirdparty/benchmark/.ycm_extra_conf.py b/benchmarks/thirdparty/benchmark/.ycm_extra_conf.py new file mode 100755 index 0000000000..5649ddcc74 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/.ycm_extra_conf.py @@ -0,0 +1,115 @@ +import os +import ycm_core + +# These are the compilation flags that will be used in case there's no +# compilation database set (by default, one is not set). +# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR. +flags = [ +'-Wall', +'-Werror', +'-pedantic-errors', +'-std=c++0x', +'-fno-strict-aliasing', +'-O3', +'-DNDEBUG', +# ...and the same thing goes for the magic -x option which specifies the +# language that the files to be compiled are written in. This is mostly +# relevant for c++ headers. +# For a C project, you would set this to 'c' instead of 'c++'. +'-x', 'c++', +'-I', 'include', +'-isystem', '/usr/include', +'-isystem', '/usr/local/include', +] + + +# Set this to the absolute path to the folder (NOT the file!) containing the +# compile_commands.json file to use that instead of 'flags'. See here for +# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html +# +# Most projects will NOT need to set this to anything; you can just change the +# 'flags' list of compilation flags. Notice that YCM itself uses that approach. +compilation_database_folder = '' + +if os.path.exists( compilation_database_folder ): + database = ycm_core.CompilationDatabase( compilation_database_folder ) +else: + database = None + +SOURCE_EXTENSIONS = [ '.cc' ] + +def DirectoryOfThisScript(): + return os.path.dirname( os.path.abspath( __file__ ) ) + + +def MakeRelativePathsInFlagsAbsolute( flags, working_directory ): + if not working_directory: + return list( flags ) + new_flags = [] + make_next_absolute = False + path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ] + for flag in flags: + new_flag = flag + + if make_next_absolute: + make_next_absolute = False + if not flag.startswith( '/' ): + new_flag = os.path.join( working_directory, flag ) + + for path_flag in path_flags: + if flag == path_flag: + make_next_absolute = True + break + + if flag.startswith( path_flag ): + path = flag[ len( path_flag ): ] + new_flag = path_flag + os.path.join( working_directory, path ) + break + + if new_flag: + new_flags.append( new_flag ) + return new_flags + + +def IsHeaderFile( filename ): + extension = os.path.splitext( filename )[ 1 ] + return extension in [ '.h', '.hxx', '.hpp', '.hh' ] + + +def GetCompilationInfoForFile( filename ): + # The compilation_commands.json file generated by CMake does not have entries + # for header files. So we do our best by asking the db for flags for a + # corresponding source file, if any. If one exists, the flags for that file + # should be good enough. + if IsHeaderFile( filename ): + basename = os.path.splitext( filename )[ 0 ] + for extension in SOURCE_EXTENSIONS: + replacement_file = basename + extension + if os.path.exists( replacement_file ): + compilation_info = database.GetCompilationInfoForFile( + replacement_file ) + if compilation_info.compiler_flags_: + return compilation_info + return None + return database.GetCompilationInfoForFile( filename ) + + +def FlagsForFile( filename, **kwargs ): + if database: + # Bear in mind that compilation_info.compiler_flags_ does NOT return a + # python list, but a "list-like" StringVec object + compilation_info = GetCompilationInfoForFile( filename ) + if not compilation_info: + return None + + final_flags = MakeRelativePathsInFlagsAbsolute( + compilation_info.compiler_flags_, + compilation_info.compiler_working_dir_ ) + else: + relative_to = DirectoryOfThisScript() + final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to ) + + return { + 'flags': final_flags, + 'do_cache': True + } diff --git a/benchmarks/thirdparty/benchmark/AUTHORS b/benchmarks/thirdparty/benchmark/AUTHORS new file mode 100755 index 0000000000..e353b53bf3 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/AUTHORS @@ -0,0 +1,57 @@ +# This is the official list of benchmark authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. +# +# Names should be added to this file as: +# Name or Organization +# The email address is not required for organizations. +# +# Please keep the list sorted. + +Albert Pretorius +Alex Steele +Andriy Berestovskyy +Arne Beer +Carto +Christian Wassermann +Christopher Seymour +Colin Braley +Daniel Harvey +David Coeurjolly +Deniz Evrenci +Dirac Research +Dominik Czarnota +Eric Backus +Eric Fiselier +Eugene Zhuk +Evgeny Safronov +Federico Ficarelli +Felix Homann +GergΕ‘ SzitΓ‘r +Google Inc. +International Business Machines Corporation +Ismael Jimenez Martinez +Jern-Kuan Leong +JianXiong Zhou +Joao Paulo Magalhaes +Jordan Williams +Jussi Knuuttila +Kaito Udagawa +Kishan Kumar +Lei Xu +Matt Clarkson +Maxim Vafin +MongoDB Inc. +Nick Hutchinson +Oleksandr Sochka +Ori Livneh +Paul Redmond +Radoslav Yovchev +Roman Lebedev +Sayan Bhattacharjee +Shuo Chen +Steinar H. Gunderson +Stripe, Inc. +Yixuan Qiu +Yusuke Suzuki +Zbigniew Skowron diff --git a/benchmarks/thirdparty/benchmark/BUILD.bazel b/benchmarks/thirdparty/benchmark/BUILD.bazel new file mode 100755 index 0000000000..eb35b62730 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/BUILD.bazel @@ -0,0 +1,44 @@ +load("@rules_cc//cc:defs.bzl", "cc_library") + +licenses(["notice"]) + +config_setting( + name = "windows", + values = { + "cpu": "x64_windows", + }, + visibility = [":__subpackages__"], +) + +cc_library( + name = "benchmark", + srcs = glob( + [ + "src/*.cc", + "src/*.h", + ], + exclude = ["src/benchmark_main.cc"], + ), + hdrs = ["include/benchmark/benchmark.h"], + linkopts = select({ + ":windows": ["-DEFAULTLIB:shlwapi.lib"], + "//conditions:default": ["-pthread"], + }), + strip_include_prefix = "include", + visibility = ["//visibility:public"], +) + +cc_library( + name = "benchmark_main", + srcs = ["src/benchmark_main.cc"], + hdrs = ["include/benchmark/benchmark.h"], + strip_include_prefix = "include", + visibility = ["//visibility:public"], + deps = [":benchmark"], +) + +cc_library( + name = "benchmark_internal_headers", + hdrs = glob(["src/*.h"]), + visibility = ["//test:__pkg__"], +) diff --git a/benchmarks/thirdparty/benchmark/CMakeLists.txt b/benchmarks/thirdparty/benchmark/CMakeLists.txt new file mode 100755 index 0000000000..a157666148 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/CMakeLists.txt @@ -0,0 +1,286 @@ +cmake_minimum_required (VERSION 3.5.1) + +foreach(p + CMP0048 # OK to clear PROJECT_VERSION on project() + CMP0054 # CMake 3.1 + CMP0056 # export EXE_LINKER_FLAGS to try_run + CMP0057 # Support no if() IN_LIST operator + CMP0063 # Honor visibility properties for all targets + CMP0077 # Allow option() overrides in importing projects + ) + if(POLICY ${p}) + cmake_policy(SET ${p} NEW) + endif() +endforeach() + +project (benchmark CXX) + +option(BENCHMARK_ENABLE_TESTING "Enable testing of the benchmark library." ON) +option(BENCHMARK_ENABLE_EXCEPTIONS "Enable the use of exceptions in the benchmark library." ON) +option(BENCHMARK_ENABLE_LTO "Enable link time optimisation of the benchmark library." OFF) +option(BENCHMARK_USE_LIBCXX "Build and test using libc++ as the standard library." OFF) +if(NOT MSVC) + option(BENCHMARK_BUILD_32_BITS "Build a 32 bit version of the library." OFF) +else() + set(BENCHMARK_BUILD_32_BITS OFF CACHE BOOL "Build a 32 bit version of the library - unsupported when using MSVC)" FORCE) +endif() +option(BENCHMARK_ENABLE_INSTALL "Enable installation of benchmark. (Projects embedding benchmark may want to turn this OFF.)" ON) + +# Allow unmet dependencies to be met using CMake's ExternalProject mechanics, which +# may require downloading the source code. +option(BENCHMARK_DOWNLOAD_DEPENDENCIES "Allow the downloading and in-tree building of unmet dependencies" OFF) + +# This option can be used to disable building and running unit tests which depend on gtest +# in cases where it is not possible to build or find a valid version of gtest. +option(BENCHMARK_ENABLE_GTEST_TESTS "Enable building the unit tests which depend on gtest" ON) + +set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON) +set(ENABLE_ASSEMBLY_TESTS_DEFAULT OFF) +function(should_enable_assembly_tests) + if(CMAKE_BUILD_TYPE) + string(TOLOWER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_LOWER) + if (${CMAKE_BUILD_TYPE_LOWER} MATCHES "coverage") + # FIXME: The --coverage flag needs to be removed when building assembly + # tests for this to work. + return() + endif() + endif() + if (MSVC) + return() + elseif(NOT CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64") + return() + elseif(NOT CMAKE_SIZEOF_VOID_P EQUAL 8) + # FIXME: Make these work on 32 bit builds + return() + elseif(BENCHMARK_BUILD_32_BITS) + # FIXME: Make these work on 32 bit builds + return() + endif() + find_program(LLVM_FILECHECK_EXE FileCheck) + if (LLVM_FILECHECK_EXE) + set(LLVM_FILECHECK_EXE "${LLVM_FILECHECK_EXE}" CACHE PATH "llvm filecheck" FORCE) + message(STATUS "LLVM FileCheck Found: ${LLVM_FILECHECK_EXE}") + else() + message(STATUS "Failed to find LLVM FileCheck") + return() + endif() + set(ENABLE_ASSEMBLY_TESTS_DEFAULT ON PARENT_SCOPE) +endfunction() +should_enable_assembly_tests() + +# This option disables the building and running of the assembly verification tests +option(BENCHMARK_ENABLE_ASSEMBLY_TESTS "Enable building and running the assembly tests" + ${ENABLE_ASSEMBLY_TESTS_DEFAULT}) + +# Make sure we can import out CMake functions +list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules") +list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake") + + +# Read the git tags to determine the project version +include(GetGitVersion) +get_git_version(GIT_VERSION) + +# Tell the user what versions we are using +string(REGEX MATCH "[0-9]+\\.[0-9]+\\.[0-9]+" VERSION ${GIT_VERSION}) +message(STATUS "Version: ${VERSION}") + +# The version of the libraries +set(GENERIC_LIB_VERSION ${VERSION}) +string(SUBSTRING ${VERSION} 0 1 GENERIC_LIB_SOVERSION) + +# Import our CMake modules +include(CheckCXXCompilerFlag) +include(AddCXXCompilerFlag) +include(CXXFeatureCheck) + +if (BENCHMARK_BUILD_32_BITS) + add_required_cxx_compiler_flag(-m32) +endif() + +if (MSVC) + # Turn compiler warnings up to 11 + string(REGEX REPLACE "[-/]W[1-4]" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W4") + add_definitions(-D_CRT_SECURE_NO_WARNINGS) + + if (NOT BENCHMARK_ENABLE_EXCEPTIONS) + add_cxx_compiler_flag(-EHs-) + add_cxx_compiler_flag(-EHa-) + add_definitions(-D_HAS_EXCEPTIONS=0) + endif() + # Link time optimisation + if (BENCHMARK_ENABLE_LTO) + set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /GL") + set(CMAKE_STATIC_LINKER_FLAGS_RELEASE "${CMAKE_STATIC_LINKER_FLAGS_RELEASE} /LTCG") + set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} /LTCG") + set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} /LTCG") + + set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} /GL") + string(REGEX REPLACE "[-/]INCREMENTAL" "/INCREMENTAL:NO" CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO}") + set(CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO} /LTCG") + string(REGEX REPLACE "[-/]INCREMENTAL" "/INCREMENTAL:NO" CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO}") + set(CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO} /LTCG") + string(REGEX REPLACE "[-/]INCREMENTAL" "/INCREMENTAL:NO" CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO}") + set(CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} /LTCG") + + set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL} /GL") + set(CMAKE_STATIC_LINKER_FLAGS_MINSIZEREL "${CMAKE_STATIC_LINKER_FLAGS_MINSIZEREL} /LTCG") + set(CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL "${CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL} /LTCG") + set(CMAKE_EXE_LINKER_FLAGS_MINSIZEREL "${CMAKE_EXE_LINKER_FLAGS_MINSIZEREL} /LTCG") + endif() +else() + # Try and enable C++11. Don't use C++14 because it doesn't work in some + # configurations. + add_cxx_compiler_flag(-std=c++11) + if (NOT HAVE_CXX_FLAG_STD_CXX11) + add_cxx_compiler_flag(-std=c++0x) + endif() + + # Turn compiler warnings up to 11 + add_cxx_compiler_flag(-Wall) + add_cxx_compiler_flag(-Wextra) + add_cxx_compiler_flag(-Wshadow) + add_cxx_compiler_flag(-Werror RELEASE) + add_cxx_compiler_flag(-Werror RELWITHDEBINFO) + add_cxx_compiler_flag(-Werror MINSIZEREL) + # Disabled until googletest (gmock) stops emitting variadic macro warnings + #add_cxx_compiler_flag(-pedantic) + #add_cxx_compiler_flag(-pedantic-errors) + add_cxx_compiler_flag(-Wshorten-64-to-32) + add_cxx_compiler_flag(-fstrict-aliasing) + # Disable warnings regarding deprecated parts of the library while building + # and testing those parts of the library. + add_cxx_compiler_flag(-Wno-deprecated-declarations) + if (CMAKE_CXX_COMPILER_ID STREQUAL "Intel") + # Intel silently ignores '-Wno-deprecated-declarations', + # warning no. 1786 must be explicitly disabled. + # See #631 for rationale. + add_cxx_compiler_flag(-wd1786) + endif() + # Disable deprecation warnings for release builds (when -Werror is enabled). + add_cxx_compiler_flag(-Wno-deprecated RELEASE) + add_cxx_compiler_flag(-Wno-deprecated RELWITHDEBINFO) + add_cxx_compiler_flag(-Wno-deprecated MINSIZEREL) + if (NOT BENCHMARK_ENABLE_EXCEPTIONS) + add_cxx_compiler_flag(-fno-exceptions) + endif() + + if (HAVE_CXX_FLAG_FSTRICT_ALIASING) + if (NOT CMAKE_CXX_COMPILER_ID STREQUAL "Intel") #ICC17u2: Many false positives for Wstrict-aliasing + add_cxx_compiler_flag(-Wstrict-aliasing) + endif() + endif() + # ICC17u2: overloaded virtual function "benchmark::Fixture::SetUp" is only partially overridden + # (because of deprecated overload) + add_cxx_compiler_flag(-wd654) + add_cxx_compiler_flag(-Wthread-safety) + if (HAVE_CXX_FLAG_WTHREAD_SAFETY) + cxx_feature_check(THREAD_SAFETY_ATTRIBUTES) + endif() + + # On most UNIX like platforms g++ and clang++ define _GNU_SOURCE as a + # predefined macro, which turns on all of the wonderful libc extensions. + # However g++ doesn't do this in Cygwin so we have to define it ourselfs + # since we depend on GNU/POSIX/BSD extensions. + if (CYGWIN) + add_definitions(-D_GNU_SOURCE=1) + endif() + + if (QNXNTO) + add_definitions(-D_QNX_SOURCE) + endif() + + # Link time optimisation + if (BENCHMARK_ENABLE_LTO) + add_cxx_compiler_flag(-flto) + if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") + find_program(GCC_AR gcc-ar) + if (GCC_AR) + set(CMAKE_AR ${GCC_AR}) + endif() + find_program(GCC_RANLIB gcc-ranlib) + if (GCC_RANLIB) + set(CMAKE_RANLIB ${GCC_RANLIB}) + endif() + elseif("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") + include(llvm-toolchain) + endif() + endif() + + # Coverage build type + set(BENCHMARK_CXX_FLAGS_COVERAGE "${CMAKE_CXX_FLAGS_DEBUG}" + CACHE STRING "Flags used by the C++ compiler during coverage builds." + FORCE) + set(BENCHMARK_EXE_LINKER_FLAGS_COVERAGE "${CMAKE_EXE_LINKER_FLAGS_DEBUG}" + CACHE STRING "Flags used for linking binaries during coverage builds." + FORCE) + set(BENCHMARK_SHARED_LINKER_FLAGS_COVERAGE "${CMAKE_SHARED_LINKER_FLAGS_DEBUG}" + CACHE STRING "Flags used by the shared libraries linker during coverage builds." + FORCE) + mark_as_advanced( + BENCHMARK_CXX_FLAGS_COVERAGE + BENCHMARK_EXE_LINKER_FLAGS_COVERAGE + BENCHMARK_SHARED_LINKER_FLAGS_COVERAGE) + set(CMAKE_BUILD_TYPE "${CMAKE_BUILD_TYPE}" CACHE STRING + "Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel Coverage.") + add_cxx_compiler_flag(--coverage COVERAGE) +endif() + +if (BENCHMARK_USE_LIBCXX) + if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") + add_cxx_compiler_flag(-stdlib=libc++) + elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU" OR + "${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel") + add_cxx_compiler_flag(-nostdinc++) + message(WARNING "libc++ header path must be manually specified using CMAKE_CXX_FLAGS") + # Adding -nodefaultlibs directly to CMAKE__LINKER_FLAGS will break + # configuration checks such as 'find_package(Threads)' + list(APPEND BENCHMARK_CXX_LINKER_FLAGS -nodefaultlibs) + # -lc++ cannot be added directly to CMAKE__LINKER_FLAGS because + # linker flags appear before all linker inputs and -lc++ must appear after. + list(APPEND BENCHMARK_CXX_LIBRARIES c++) + else() + message(FATAL_ERROR "-DBENCHMARK_USE_LIBCXX:BOOL=ON is not supported for compiler") + endif() +endif(BENCHMARK_USE_LIBCXX) + +set(EXTRA_CXX_FLAGS "") +if (WIN32 AND "${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") + # Clang on Windows fails to compile the regex feature check under C++11 + set(EXTRA_CXX_FLAGS "-DCMAKE_CXX_STANDARD=14") +endif() + +# C++ feature checks +# Determine the correct regular expression engine to use +cxx_feature_check(STD_REGEX ${EXTRA_CXX_FLAGS}) +cxx_feature_check(GNU_POSIX_REGEX ${EXTRA_CXX_FLAGS}) +cxx_feature_check(POSIX_REGEX ${EXTRA_CXX_FLAGS}) +if(NOT HAVE_STD_REGEX AND NOT HAVE_GNU_POSIX_REGEX AND NOT HAVE_POSIX_REGEX) + message(FATAL_ERROR "Failed to determine the source files for the regular expression backend") +endif() +if (NOT BENCHMARK_ENABLE_EXCEPTIONS AND HAVE_STD_REGEX + AND NOT HAVE_GNU_POSIX_REGEX AND NOT HAVE_POSIX_REGEX) + message(WARNING "Using std::regex with exceptions disabled is not fully supported") +endif() + +cxx_feature_check(STEADY_CLOCK) +# Ensure we have pthreads +set(THREADS_PREFER_PTHREAD_FLAG ON) +find_package(Threads REQUIRED) + +# Set up directories +include_directories(${PROJECT_SOURCE_DIR}/include) + +# Build the targets +add_subdirectory(src) + +if (BENCHMARK_ENABLE_TESTING) + enable_testing() + if (BENCHMARK_ENABLE_GTEST_TESTS AND + NOT (TARGET gtest AND TARGET gtest_main AND + TARGET gmock AND TARGET gmock_main)) + include(GoogleTest) + endif() + add_subdirectory(test) +endif() diff --git a/benchmarks/thirdparty/benchmark/CONTRIBUTING.md b/benchmarks/thirdparty/benchmark/CONTRIBUTING.md new file mode 100755 index 0000000000..43de4c9d47 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/CONTRIBUTING.md @@ -0,0 +1,58 @@ +# How to contribute # + +We'd love to accept your patches and contributions to this project. There are +a just a few small guidelines you need to follow. + + +## Contributor License Agreement ## + +Contributions to any Google project must be accompanied by a Contributor +License Agreement. This is not a copyright **assignment**, it simply gives +Google permission to use and redistribute your contributions as part of the +project. + + * If you are an individual writing original source code and you're sure you + own the intellectual property, then you'll need to sign an [individual + CLA][]. + + * If you work for a company that wants to allow you to contribute your work, + then you'll need to sign a [corporate CLA][]. + +You generally only need to submit a CLA once, so if you've already submitted +one (even if it was for a different project), you probably don't need to do it +again. + +[individual CLA]: https://developers.google.com/open-source/cla/individual +[corporate CLA]: https://developers.google.com/open-source/cla/corporate + +Once your CLA is submitted (or if you already submitted one for +another Google project), make a commit adding yourself to the +[AUTHORS][] and [CONTRIBUTORS][] files. This commit can be part +of your first [pull request][]. + +[AUTHORS]: AUTHORS +[CONTRIBUTORS]: CONTRIBUTORS + + +## Submitting a patch ## + + 1. It's generally best to start by opening a new issue describing the bug or + feature you're intending to fix. Even if you think it's relatively minor, + it's helpful to know what people are working on. Mention in the initial + issue that you are planning to work on that bug or feature so that it can + be assigned to you. + + 1. Follow the normal process of [forking][] the project, and setup a new + branch to work in. It's important that each group of changes be done in + separate branches in order to ensure that a pull request only includes the + commits related to that bug or feature. + + 1. Do your best to have [well-formed commit messages][] for each change. + This provides consistency throughout the project, and ensures that commit + messages are able to be formatted properly by various git tools. + + 1. Finally, push the commits to your fork and submit a [pull request][]. + +[forking]: https://help.github.com/articles/fork-a-repo +[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html +[pull request]: https://help.github.com/articles/creating-a-pull-request diff --git a/benchmarks/thirdparty/benchmark/CONTRIBUTORS b/benchmarks/thirdparty/benchmark/CONTRIBUTORS new file mode 100755 index 0000000000..6beed7166e --- /dev/null +++ b/benchmarks/thirdparty/benchmark/CONTRIBUTORS @@ -0,0 +1,79 @@ +# People who have agreed to one of the CLAs and can contribute patches. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# https://developers.google.com/open-source/cla/individual +# https://developers.google.com/open-source/cla/corporate +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. +# +# Names should be added to this file as: +# Name +# +# Please keep the list sorted. + +Albert Pretorius +Alex Steele +Andriy Berestovskyy +Arne Beer +Billy Robert O'Neal III +Chris Kennelly +Christian Wassermann +Christopher Seymour +Colin Braley +Cyrille Faucheux +Daniel Harvey +David Coeurjolly +Deniz Evrenci +Dominic Hamon +Dominik Czarnota +Eric Backus +Eric Fiselier +Eugene Zhuk +Evgeny Safronov +Federico Ficarelli +Felix Homann +Geoffrey Martin-Noble +GergΕ‘ SzitΓ‘r +Hannes Hauswedell +Ismael Jimenez Martinez +Jern-Kuan Leong +JianXiong Zhou +Joao Paulo Magalhaes +John Millikin +Jordan Williams +Jussi Knuuttila +Kai Wolf +Kaito Udagawa +Kishan Kumar +Lei Xu +Matt Clarkson +Maxim Vafin +Nick Hutchinson +Oleksandr Sochka +Ori Livneh +Pascal Leroy +Paul Redmond +Pierre Phaneuf +Radoslav Yovchev +Raul Marin +Ray Glover +Robert Guo +Roman Lebedev +Sayan Bhattacharjee +Shuo Chen +Tobias UlvgΓ₯rd +Tom Madams +Yixuan Qiu +Yusuke Suzuki +Zbigniew Skowron diff --git a/benchmarks/thirdparty/benchmark/LICENSE b/benchmarks/thirdparty/benchmark/LICENSE new file mode 100755 index 0000000000..d645695673 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/benchmarks/thirdparty/benchmark/README.md b/benchmarks/thirdparty/benchmark/README.md new file mode 100755 index 0000000000..41a1bdff75 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/README.md @@ -0,0 +1,1319 @@ +# Benchmark + +[![Build Status](https://travis-ci.org/google/benchmark.svg?branch=master)](https://travis-ci.org/google/benchmark) +[![Build status](https://ci.appveyor.com/api/projects/status/u0qsyp7t1tk7cpxs/branch/master?svg=true)](https://ci.appveyor.com/project/google/benchmark/branch/master) +[![Coverage Status](https://coveralls.io/repos/google/benchmark/badge.svg)](https://coveralls.io/r/google/benchmark) +[![slackin](https://slackin-iqtfqnpzxd.now.sh/badge.svg)](https://slackin-iqtfqnpzxd.now.sh/) + +A library to benchmark code snippets, similar to unit tests. Example: + +```c++ +#include + +static void BM_SomeFunction(benchmark::State& state) { + // Perform setup here + for (auto _ : state) { + // This code gets timed + SomeFunction(); + } +} +// Register the function as a benchmark +BENCHMARK(BM_SomeFunction); +// Run the benchmark +BENCHMARK_MAIN(); +``` + +To get started, see [Requirements](#requirements) and +[Installation](#installation). See [Usage](#usage) for a full example and the +[User Guide](#user-guide) for a more comprehensive feature overview. + +It may also help to read the [Google Test documentation](https://github.com/google/googletest/blob/master/googletest/docs/primer.md) +as some of the structural aspects of the APIs are similar. + +### Resources + +[Discussion group](https://groups.google.com/d/forum/benchmark-discuss) + +IRC channel: [freenode](https://freenode.net) #googlebenchmark + +[Additional Tooling Documentation](docs/tools.md) + +[Assembly Testing Documentation](docs/AssemblyTests.md) + +## Requirements + +The library can be used with C++03. However, it requires C++11 to build, +including compiler and standard library support. + +The following minimum versions are required to build the library: + +* GCC 4.8 +* Clang 3.4 +* Visual Studio 14 2015 +* Intel 2015 Update 1 + +See [Platform-Specific Build Instructions](#platform-specific-build-instructions). + +## Installation + +This describes the installation process using cmake. As pre-requisites, you'll +need git and cmake installed. + +_See [dependencies.md](dependencies.md) for more details regarding supported +versions of build tools._ + +```bash +# Check out the library. +$ git clone https://github.com/google/benchmark.git +# Benchmark requires Google Test as a dependency. Add the source tree as a subdirectory. +$ git clone https://github.com/google/googletest.git benchmark/googletest +# Go to the library root directory +$ cd benchmark +# Make a build directory to place the build output. +$ cmake -E make_directory "build" +# Generate build system files with cmake. +$ cmake -E chdir "build" cmake -DCMAKE_BUILD_TYPE=Release ../ +# or, starting with CMake 3.13, use a simpler form: +# cmake -DCMAKE_BUILD_TYPE=Release -S . -B "build" +# Build the library. +$ cmake --build "build" --config Release +``` +This builds the `benchmark` and `benchmark_main` libraries and tests. +On a unix system, the build directory should now look something like this: + +``` +/benchmark + /build + /src + /libbenchmark.a + /libbenchmark_main.a + /test + ... +``` + +Next, you can run the tests to check the build. + +```bash +$ cmake -E chdir "build" ctest --build-config Release +``` + +If you want to install the library globally, also run: + +``` +sudo cmake --build "build" --config Release --target install +``` + +Note that Google Benchmark requires Google Test to build and run the tests. This +dependency can be provided two ways: + +* Checkout the Google Test sources into `benchmark/googletest` as above. +* Otherwise, if `-DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON` is specified during + configuration, the library will automatically download and build any required + dependencies. + +If you do not wish to build and run the tests, add `-DBENCHMARK_ENABLE_GTEST_TESTS=OFF` +to `CMAKE_ARGS`. + +### Debug vs Release + +By default, benchmark builds as a debug library. You will see a warning in the +output when this is the case. To build it as a release library instead, add +`-DCMAKE_BUILD_TYPE=Release` when generating the build system files, as shown +above. The use of `--config Release` in build commands is needed to properly +support multi-configuration tools (like Visual Studio for example) and can be +skipped for other build systems (like Makefile). + +To enable link-time optimisation, also add `-DBENCHMARK_ENABLE_LTO=true` when +generating the build system files. + +If you are using gcc, you might need to set `GCC_AR` and `GCC_RANLIB` cmake +cache variables, if autodetection fails. + +If you are using clang, you may need to set `LLVMAR_EXECUTABLE`, +`LLVMNM_EXECUTABLE` and `LLVMRANLIB_EXECUTABLE` cmake cache variables. + +### Stable and Experimental Library Versions + +The main branch contains the latest stable version of the benchmarking library; +the API of which can be considered largely stable, with source breaking changes +being made only upon the release of a new major version. + +Newer, experimental, features are implemented and tested on the +[`v2` branch](https://github.com/google/benchmark/tree/v2). Users who wish +to use, test, and provide feedback on the new features are encouraged to try +this branch. However, this branch provides no stability guarantees and reserves +the right to change and break the API at any time. + +## Usage + +### Basic usage + +Define a function that executes the code to measure, register it as a benchmark +function using the `BENCHMARK` macro, and ensure an appropriate `main` function +is available: + +```c++ +#include + +static void BM_StringCreation(benchmark::State& state) { + for (auto _ : state) + std::string empty_string; +} +// Register the function as a benchmark +BENCHMARK(BM_StringCreation); + +// Define another benchmark +static void BM_StringCopy(benchmark::State& state) { + std::string x = "hello"; + for (auto _ : state) + std::string copy(x); +} +BENCHMARK(BM_StringCopy); + +BENCHMARK_MAIN(); +``` + +To run the benchmark, compile and link against the `benchmark` library +(libbenchmark.a/.so). If you followed the build steps above, this library will +be under the build directory you created. + +```bash +# Example on linux after running the build steps above. Assumes the +# `benchmark` and `build` directories are under the current directory. +$ g++ mybenchmark.cc -std=c++11 -isystem benchmark/include \ + -Lbenchmark/build/src -lbenchmark -lpthread -o mybenchmark +``` + +Alternatively, link against the `benchmark_main` library and remove +`BENCHMARK_MAIN();` above to get the same behavior. + +The compiled executable will run all benchmarks by default. Pass the `--help` +flag for option information or see the guide below. + +### Usage with CMake + +If using CMake, it is recommended to link against the project-provided +`benchmark::benchmark` and `benchmark::benchmark_main` targets using +`target_link_libraries`. +It is possible to use ```find_package``` to import an installed version of the +library. +```cmake +find_package(benchmark REQUIRED) +``` +Alternatively, ```add_subdirectory``` will incorporate the library directly in +to one's CMake project. +```cmake +add_subdirectory(benchmark) +``` +Either way, link to the library as follows. +```cmake +target_link_libraries(MyTarget benchmark::benchmark) +``` + +## Platform Specific Build Instructions + +### Building with GCC + +When the library is built using GCC it is necessary to link with the pthread +library due to how GCC implements `std::thread`. Failing to link to pthread will +lead to runtime exceptions (unless you're using libc++), not linker errors. See +[issue #67](https://github.com/google/benchmark/issues/67) for more details. You +can link to pthread by adding `-pthread` to your linker command. Note, you can +also use `-lpthread`, but there are potential issues with ordering of command +line parameters if you use that. + +### Building with Visual Studio 2015 or 2017 + +The `shlwapi` library (`-lshlwapi`) is required to support a call to `CPUInfo` which reads the registry. Either add `shlwapi.lib` under `[ Configuration Properties > Linker > Input ]`, or use the following: + +``` +// Alternatively, can add libraries using linker options. +#ifdef _WIN32 +#pragma comment ( lib, "Shlwapi.lib" ) +#ifdef _DEBUG +#pragma comment ( lib, "benchmarkd.lib" ) +#else +#pragma comment ( lib, "benchmark.lib" ) +#endif +#endif +``` + +Can also use the graphical version of CMake: +* Open `CMake GUI`. +* Under `Where to build the binaries`, same path as source plus `build`. +* Under `CMAKE_INSTALL_PREFIX`, same path as source plus `install`. +* Click `Configure`, `Generate`, `Open Project`. +* If build fails, try deleting entire directory and starting again, or unticking options to build less. + +### Building with Intel 2015 Update 1 or Intel System Studio Update 4 + +See instructions for building with Visual Studio. Once built, right click on the solution and change the build to Intel. + +### Building on Solaris + +If you're running benchmarks on solaris, you'll want the kstat library linked in +too (`-lkstat`). + +## User Guide + +### Command Line + +[Output Formats](#output-formats) + +[Output Files](#output-files) + +[Running Benchmarks](#running-benchmarks) + +[Running a Subset of Benchmarks](#running-a-subset-of-benchmarks) + +[Result Comparison](#result-comparison) + +### Library + +[Runtime and Reporting Considerations](#runtime-and-reporting-considerations) + +[Passing Arguments](#passing-arguments) + +[Calculating Asymptotic Complexity](#asymptotic-complexity) + +[Templated Benchmarks](#templated-benchmarks) + +[Fixtures](#fixtures) + +[Custom Counters](#custom-counters) + +[Multithreaded Benchmarks](#multithreaded-benchmarks) + +[CPU Timers](#cpu-timers) + +[Manual Timing](#manual-timing) + +[Setting the Time Unit](#setting-the-time-unit) + +[Preventing Optimization](#preventing-optimization) + +[Reporting Statistics](#reporting-statistics) + +[Custom Statistics](#custom-statistics) + +[Using RegisterBenchmark](#using-register-benchmark) + +[Exiting with an Error](#exiting-with-an-error) + +[A Faster KeepRunning Loop](#a-faster-keep-running-loop) + +[Disabling CPU Frequency Scaling](#disabling-cpu-frequency-scaling) + + + + +### Output Formats + +The library supports multiple output formats. Use the +`--benchmark_format=` flag (or set the +`BENCHMARK_FORMAT=` environment variable) to set +the format type. `console` is the default format. + +The Console format is intended to be a human readable format. By default +the format generates color output. Context is output on stderr and the +tabular data on stdout. Example tabular output looks like: + +``` +Benchmark Time(ns) CPU(ns) Iterations +---------------------------------------------------------------------- +BM_SetInsert/1024/1 28928 29349 23853 133.097kB/s 33.2742k items/s +BM_SetInsert/1024/8 32065 32913 21375 949.487kB/s 237.372k items/s +BM_SetInsert/1024/10 33157 33648 21431 1.13369MB/s 290.225k items/s +``` + +The JSON format outputs human readable json split into two top level attributes. +The `context` attribute contains information about the run in general, including +information about the CPU and the date. +The `benchmarks` attribute contains a list of every benchmark run. Example json +output looks like: + +```json +{ + "context": { + "date": "2015/03/17-18:40:25", + "num_cpus": 40, + "mhz_per_cpu": 2801, + "cpu_scaling_enabled": false, + "build_type": "debug" + }, + "benchmarks": [ + { + "name": "BM_SetInsert/1024/1", + "iterations": 94877, + "real_time": 29275, + "cpu_time": 29836, + "bytes_per_second": 134066, + "items_per_second": 33516 + }, + { + "name": "BM_SetInsert/1024/8", + "iterations": 21609, + "real_time": 32317, + "cpu_time": 32429, + "bytes_per_second": 986770, + "items_per_second": 246693 + }, + { + "name": "BM_SetInsert/1024/10", + "iterations": 21393, + "real_time": 32724, + "cpu_time": 33355, + "bytes_per_second": 1199226, + "items_per_second": 299807 + } + ] +} +``` + +The CSV format outputs comma-separated values. The `context` is output on stderr +and the CSV itself on stdout. Example CSV output looks like: + +``` +name,iterations,real_time,cpu_time,bytes_per_second,items_per_second,label +"BM_SetInsert/1024/1",65465,17890.7,8407.45,475768,118942, +"BM_SetInsert/1024/8",116606,18810.1,9766.64,3.27646e+06,819115, +"BM_SetInsert/1024/10",106365,17238.4,8421.53,4.74973e+06,1.18743e+06, +``` + + + +### Output Files + +Write benchmark results to a file with the `--benchmark_out=` option +(or set `BENCHMARK_OUT`). Specify the output format with +`--benchmark_out_format={json|console|csv}` (or set +`BENCHMARK_OUT_FORMAT={json|console|csv}`). Note that specifying +`--benchmark_out` does not suppress the console output. + + + +### Running Benchmarks + +Benchmarks are executed by running the produced binaries. Benchmarks binaries, +by default, accept options that may be specified either through their command +line interface or by setting environment variables before execution. For every +`--option_flag=` CLI switch, a corresponding environment variable +`OPTION_FLAG=` exist and is used as default if set (CLI switches always + prevails). A complete list of CLI options is available running benchmarks + with the `--help` switch. + + + +### Running a Subset of Benchmarks + +The `--benchmark_filter=` option (or `BENCHMARK_FILTER=` +environment variable) can be used to only run the benchmarks that match +the specified ``. For example: + +```bash +$ ./run_benchmarks.x --benchmark_filter=BM_memcpy/32 +Run on (1 X 2300 MHz CPU ) +2016-06-25 19:34:24 +Benchmark Time CPU Iterations +---------------------------------------------------- +BM_memcpy/32 11 ns 11 ns 79545455 +BM_memcpy/32k 2181 ns 2185 ns 324074 +BM_memcpy/32 12 ns 12 ns 54687500 +BM_memcpy/32k 1834 ns 1837 ns 357143 +``` + + + +### Result comparison + +It is possible to compare the benchmarking results. +See [Additional Tooling Documentation](docs/tools.md) + + + +### Runtime and Reporting Considerations + +When the benchmark binary is executed, each benchmark function is run serially. +The number of iterations to run is determined dynamically by running the +benchmark a few times and measuring the time taken and ensuring that the +ultimate result will be statistically stable. As such, faster benchmark +functions will be run for more iterations than slower benchmark functions, and +the number of iterations is thus reported. + +In all cases, the number of iterations for which the benchmark is run is +governed by the amount of time the benchmark takes. Concretely, the number of +iterations is at least one, not more than 1e9, until CPU time is greater than +the minimum time, or the wallclock time is 5x minimum time. The minimum time is +set per benchmark by calling `MinTime` on the registered benchmark object. + +Average timings are then reported over the iterations run. If multiple +repetitions are requested using the `--benchmark_repetitions` command-line +option, or at registration time, the benchmark function will be run several +times and statistical results across these repetitions will also be reported. + +As well as the per-benchmark entries, a preamble in the report will include +information about the machine on which the benchmarks are run. + + + +### Passing Arguments + +Sometimes a family of benchmarks can be implemented with just one routine that +takes an extra argument to specify which one of the family of benchmarks to +run. For example, the following code defines a family of benchmarks for +measuring the speed of `memcpy()` calls of different lengths: + +```c++ +static void BM_memcpy(benchmark::State& state) { + char* src = new char[state.range(0)]; + char* dst = new char[state.range(0)]; + memset(src, 'x', state.range(0)); + for (auto _ : state) + memcpy(dst, src, state.range(0)); + state.SetBytesProcessed(int64_t(state.iterations()) * + int64_t(state.range(0))); + delete[] src; + delete[] dst; +} +BENCHMARK(BM_memcpy)->Arg(8)->Arg(64)->Arg(512)->Arg(1<<10)->Arg(8<<10); +``` + +The preceding code is quite repetitive, and can be replaced with the following +short-hand. The following invocation will pick a few appropriate arguments in +the specified range and will generate a benchmark for each such argument. + +```c++ +BENCHMARK(BM_memcpy)->Range(8, 8<<10); +``` + +By default the arguments in the range are generated in multiples of eight and +the command above selects [ 8, 64, 512, 4k, 8k ]. In the following code the +range multiplier is changed to multiples of two. + +```c++ +BENCHMARK(BM_memcpy)->RangeMultiplier(2)->Range(8, 8<<10); +``` + +Now arguments generated are [ 8, 16, 32, 64, 128, 256, 512, 1024, 2k, 4k, 8k ]. + +The preceding code shows a method of defining a sparse range. The following +example shows a method of defining a dense range. It is then used to benchmark +the performance of `std::vector` initialization for uniformly increasing sizes. + +```c++ +static void BM_DenseRange(benchmark::State& state) { + for(auto _ : state) { + std::vector v(state.range(0), state.range(0)); + benchmark::DoNotOptimize(v.data()); + benchmark::ClobberMemory(); + } +} +BENCHMARK(BM_DenseRange)->DenseRange(0, 1024, 128); +``` + +Now arguments generated are [ 0, 128, 256, 384, 512, 640, 768, 896, 1024 ]. + +You might have a benchmark that depends on two or more inputs. For example, the +following code defines a family of benchmarks for measuring the speed of set +insertion. + +```c++ +static void BM_SetInsert(benchmark::State& state) { + std::set data; + for (auto _ : state) { + state.PauseTiming(); + data = ConstructRandomSet(state.range(0)); + state.ResumeTiming(); + for (int j = 0; j < state.range(1); ++j) + data.insert(RandomNumber()); + } +} +BENCHMARK(BM_SetInsert) + ->Args({1<<10, 128}) + ->Args({2<<10, 128}) + ->Args({4<<10, 128}) + ->Args({8<<10, 128}) + ->Args({1<<10, 512}) + ->Args({2<<10, 512}) + ->Args({4<<10, 512}) + ->Args({8<<10, 512}); +``` + +The preceding code is quite repetitive, and can be replaced with the following +short-hand. The following macro will pick a few appropriate arguments in the +product of the two specified ranges and will generate a benchmark for each such +pair. + +```c++ +BENCHMARK(BM_SetInsert)->Ranges({{1<<10, 8<<10}, {128, 512}}); +``` + +Some benchmarks may require specific argument values that cannot be expressed +with `Ranges`. In this case, `ArgsProduct` offers the ability to generate a +benchmark input for each combination in the product of the supplied vectors. + +```c++ +BENCHMARK(BM_SetInsert) + ->ArgsProduct({{1<<10, 3<<10, 8<<10}, {20, 40, 60, 80}}) +// would generate the same benchmark arguments as +BENCHMARK(BM_SetInsert) + ->Args({1<<10, 20}) + ->Args({3<<10, 20}) + ->Args({8<<10, 20}) + ->Args({3<<10, 40}) + ->Args({8<<10, 40}) + ->Args({1<<10, 40}) + ->Args({1<<10, 60}) + ->Args({3<<10, 60}) + ->Args({8<<10, 60}) + ->Args({1<<10, 80}) + ->Args({3<<10, 80}) + ->Args({8<<10, 80}); +``` + +For more complex patterns of inputs, passing a custom function to `Apply` allows +programmatic specification of an arbitrary set of arguments on which to run the +benchmark. The following example enumerates a dense range on one parameter, +and a sparse range on the second. + +```c++ +static void CustomArguments(benchmark::internal::Benchmark* b) { + for (int i = 0; i <= 10; ++i) + for (int j = 32; j <= 1024*1024; j *= 8) + b->Args({i, j}); +} +BENCHMARK(BM_SetInsert)->Apply(CustomArguments); +``` + +#### Passing Arbitrary Arguments to a Benchmark + +In C++11 it is possible to define a benchmark that takes an arbitrary number +of extra arguments. The `BENCHMARK_CAPTURE(func, test_case_name, ...args)` +macro creates a benchmark that invokes `func` with the `benchmark::State` as +the first argument followed by the specified `args...`. +The `test_case_name` is appended to the name of the benchmark and +should describe the values passed. + +```c++ +template +void BM_takes_args(benchmark::State& state, ExtraArgs&&... extra_args) { + [...] +} +// Registers a benchmark named "BM_takes_args/int_string_test" that passes +// the specified values to `extra_args`. +BENCHMARK_CAPTURE(BM_takes_args, int_string_test, 42, std::string("abc")); +``` + +Note that elements of `...args` may refer to global variables. Users should +avoid modifying global state inside of a benchmark. + + + +### Calculating Asymptotic Complexity (Big O) + +Asymptotic complexity might be calculated for a family of benchmarks. The +following code will calculate the coefficient for the high-order term in the +running time and the normalized root-mean square error of string comparison. + +```c++ +static void BM_StringCompare(benchmark::State& state) { + std::string s1(state.range(0), '-'); + std::string s2(state.range(0), '-'); + for (auto _ : state) { + benchmark::DoNotOptimize(s1.compare(s2)); + } + state.SetComplexityN(state.range(0)); +} +BENCHMARK(BM_StringCompare) + ->RangeMultiplier(2)->Range(1<<10, 1<<18)->Complexity(benchmark::oN); +``` + +As shown in the following invocation, asymptotic complexity might also be +calculated automatically. + +```c++ +BENCHMARK(BM_StringCompare) + ->RangeMultiplier(2)->Range(1<<10, 1<<18)->Complexity(); +``` + +The following code will specify asymptotic complexity with a lambda function, +that might be used to customize high-order term calculation. + +```c++ +BENCHMARK(BM_StringCompare)->RangeMultiplier(2) + ->Range(1<<10, 1<<18)->Complexity([](benchmark::IterationCount n)->double{return n; }); +``` + + + +### Templated Benchmarks + +This example produces and consumes messages of size `sizeof(v)` `range_x` +times. It also outputs throughput in the absence of multiprogramming. + +```c++ +template void BM_Sequential(benchmark::State& state) { + Q q; + typename Q::value_type v; + for (auto _ : state) { + for (int i = state.range(0); i--; ) + q.push(v); + for (int e = state.range(0); e--; ) + q.Wait(&v); + } + // actually messages, not bytes: + state.SetBytesProcessed( + static_cast(state.iterations())*state.range(0)); +} +BENCHMARK_TEMPLATE(BM_Sequential, WaitQueue)->Range(1<<0, 1<<10); +``` + +Three macros are provided for adding benchmark templates. + +```c++ +#ifdef BENCHMARK_HAS_CXX11 +#define BENCHMARK_TEMPLATE(func, ...) // Takes any number of parameters. +#else // C++ < C++11 +#define BENCHMARK_TEMPLATE(func, arg1) +#endif +#define BENCHMARK_TEMPLATE1(func, arg1) +#define BENCHMARK_TEMPLATE2(func, arg1, arg2) +``` + + + +### Fixtures + +Fixture tests are created by first defining a type that derives from +`::benchmark::Fixture` and then creating/registering the tests using the +following macros: + +* `BENCHMARK_F(ClassName, Method)` +* `BENCHMARK_DEFINE_F(ClassName, Method)` +* `BENCHMARK_REGISTER_F(ClassName, Method)` + +For Example: + +```c++ +class MyFixture : public benchmark::Fixture { +public: + void SetUp(const ::benchmark::State& state) { + } + + void TearDown(const ::benchmark::State& state) { + } +}; + +BENCHMARK_F(MyFixture, FooTest)(benchmark::State& st) { + for (auto _ : st) { + ... + } +} + +BENCHMARK_DEFINE_F(MyFixture, BarTest)(benchmark::State& st) { + for (auto _ : st) { + ... + } +} +/* BarTest is NOT registered */ +BENCHMARK_REGISTER_F(MyFixture, BarTest)->Threads(2); +/* BarTest is now registered */ +``` + +#### Templated Fixtures + +Also you can create templated fixture by using the following macros: + +* `BENCHMARK_TEMPLATE_F(ClassName, Method, ...)` +* `BENCHMARK_TEMPLATE_DEFINE_F(ClassName, Method, ...)` + +For example: + +```c++ +template +class MyFixture : public benchmark::Fixture {}; + +BENCHMARK_TEMPLATE_F(MyFixture, IntTest, int)(benchmark::State& st) { + for (auto _ : st) { + ... + } +} + +BENCHMARK_TEMPLATE_DEFINE_F(MyFixture, DoubleTest, double)(benchmark::State& st) { + for (auto _ : st) { + ... + } +} + +BENCHMARK_REGISTER_F(MyFixture, DoubleTest)->Threads(2); +``` + + + +### Custom Counters + +You can add your own counters with user-defined names. The example below +will add columns "Foo", "Bar" and "Baz" in its output: + +```c++ +static void UserCountersExample1(benchmark::State& state) { + double numFoos = 0, numBars = 0, numBazs = 0; + for (auto _ : state) { + // ... count Foo,Bar,Baz events + } + state.counters["Foo"] = numFoos; + state.counters["Bar"] = numBars; + state.counters["Baz"] = numBazs; +} +``` + +The `state.counters` object is a `std::map` with `std::string` keys +and `Counter` values. The latter is a `double`-like class, via an implicit +conversion to `double&`. Thus you can use all of the standard arithmetic +assignment operators (`=,+=,-=,*=,/=`) to change the value of each counter. + +In multithreaded benchmarks, each counter is set on the calling thread only. +When the benchmark finishes, the counters from each thread will be summed; +the resulting sum is the value which will be shown for the benchmark. + +The `Counter` constructor accepts three parameters: the value as a `double` +; a bit flag which allows you to show counters as rates, and/or as per-thread +iteration, and/or as per-thread averages, and/or iteration invariants, +and/or finally inverting the result; and a flag specifying the 'unit' - i.e. +is 1k a 1000 (default, `benchmark::Counter::OneK::kIs1000`), or 1024 +(`benchmark::Counter::OneK::kIs1024`)? + +```c++ + // sets a simple counter + state.counters["Foo"] = numFoos; + + // Set the counter as a rate. It will be presented divided + // by the duration of the benchmark. + // Meaning: per one second, how many 'foo's are processed? + state.counters["FooRate"] = Counter(numFoos, benchmark::Counter::kIsRate); + + // Set the counter as a rate. It will be presented divided + // by the duration of the benchmark, and the result inverted. + // Meaning: how many seconds it takes to process one 'foo'? + state.counters["FooInvRate"] = Counter(numFoos, benchmark::Counter::kIsRate | benchmark::Counter::kInvert); + + // Set the counter as a thread-average quantity. It will + // be presented divided by the number of threads. + state.counters["FooAvg"] = Counter(numFoos, benchmark::Counter::kAvgThreads); + + // There's also a combined flag: + state.counters["FooAvgRate"] = Counter(numFoos,benchmark::Counter::kAvgThreadsRate); + + // This says that we process with the rate of state.range(0) bytes every iteration: + state.counters["BytesProcessed"] = Counter(state.range(0), benchmark::Counter::kIsIterationInvariantRate, benchmark::Counter::OneK::kIs1024); +``` + +When you're compiling in C++11 mode or later you can use `insert()` with +`std::initializer_list`: + +```c++ + // With C++11, this can be done: + state.counters.insert({{"Foo", numFoos}, {"Bar", numBars}, {"Baz", numBazs}}); + // ... instead of: + state.counters["Foo"] = numFoos; + state.counters["Bar"] = numBars; + state.counters["Baz"] = numBazs; +``` + +#### Counter Reporting + +When using the console reporter, by default, user counters are printed at +the end after the table, the same way as ``bytes_processed`` and +``items_processed``. This is best for cases in which there are few counters, +or where there are only a couple of lines per benchmark. Here's an example of +the default output: + +``` +------------------------------------------------------------------------------ +Benchmark Time CPU Iterations UserCounters... +------------------------------------------------------------------------------ +BM_UserCounter/threads:8 2248 ns 10277 ns 68808 Bar=16 Bat=40 Baz=24 Foo=8 +BM_UserCounter/threads:1 9797 ns 9788 ns 71523 Bar=2 Bat=5 Baz=3 Foo=1024m +BM_UserCounter/threads:2 4924 ns 9842 ns 71036 Bar=4 Bat=10 Baz=6 Foo=2 +BM_UserCounter/threads:4 2589 ns 10284 ns 68012 Bar=8 Bat=20 Baz=12 Foo=4 +BM_UserCounter/threads:8 2212 ns 10287 ns 68040 Bar=16 Bat=40 Baz=24 Foo=8 +BM_UserCounter/threads:16 1782 ns 10278 ns 68144 Bar=32 Bat=80 Baz=48 Foo=16 +BM_UserCounter/threads:32 1291 ns 10296 ns 68256 Bar=64 Bat=160 Baz=96 Foo=32 +BM_UserCounter/threads:4 2615 ns 10307 ns 68040 Bar=8 Bat=20 Baz=12 Foo=4 +BM_Factorial 26 ns 26 ns 26608979 40320 +BM_Factorial/real_time 26 ns 26 ns 26587936 40320 +BM_CalculatePiRange/1 16 ns 16 ns 45704255 0 +BM_CalculatePiRange/8 73 ns 73 ns 9520927 3.28374 +BM_CalculatePiRange/64 609 ns 609 ns 1140647 3.15746 +BM_CalculatePiRange/512 4900 ns 4901 ns 142696 3.14355 +``` + +If this doesn't suit you, you can print each counter as a table column by +passing the flag `--benchmark_counters_tabular=true` to the benchmark +application. This is best for cases in which there are a lot of counters, or +a lot of lines per individual benchmark. Note that this will trigger a +reprinting of the table header any time the counter set changes between +individual benchmarks. Here's an example of corresponding output when +`--benchmark_counters_tabular=true` is passed: + +``` +--------------------------------------------------------------------------------------- +Benchmark Time CPU Iterations Bar Bat Baz Foo +--------------------------------------------------------------------------------------- +BM_UserCounter/threads:8 2198 ns 9953 ns 70688 16 40 24 8 +BM_UserCounter/threads:1 9504 ns 9504 ns 73787 2 5 3 1 +BM_UserCounter/threads:2 4775 ns 9550 ns 72606 4 10 6 2 +BM_UserCounter/threads:4 2508 ns 9951 ns 70332 8 20 12 4 +BM_UserCounter/threads:8 2055 ns 9933 ns 70344 16 40 24 8 +BM_UserCounter/threads:16 1610 ns 9946 ns 70720 32 80 48 16 +BM_UserCounter/threads:32 1192 ns 9948 ns 70496 64 160 96 32 +BM_UserCounter/threads:4 2506 ns 9949 ns 70332 8 20 12 4 +-------------------------------------------------------------- +Benchmark Time CPU Iterations +-------------------------------------------------------------- +BM_Factorial 26 ns 26 ns 26392245 40320 +BM_Factorial/real_time 26 ns 26 ns 26494107 40320 +BM_CalculatePiRange/1 15 ns 15 ns 45571597 0 +BM_CalculatePiRange/8 74 ns 74 ns 9450212 3.28374 +BM_CalculatePiRange/64 595 ns 595 ns 1173901 3.15746 +BM_CalculatePiRange/512 4752 ns 4752 ns 147380 3.14355 +BM_CalculatePiRange/4k 37970 ns 37972 ns 18453 3.14184 +BM_CalculatePiRange/32k 303733 ns 303744 ns 2305 3.14162 +BM_CalculatePiRange/256k 2434095 ns 2434186 ns 288 3.1416 +BM_CalculatePiRange/1024k 9721140 ns 9721413 ns 71 3.14159 +BM_CalculatePi/threads:8 2255 ns 9943 ns 70936 +``` + +Note above the additional header printed when the benchmark changes from +``BM_UserCounter`` to ``BM_Factorial``. This is because ``BM_Factorial`` does +not have the same counter set as ``BM_UserCounter``. + + + +### Multithreaded Benchmarks + +In a multithreaded test (benchmark invoked by multiple threads simultaneously), +it is guaranteed that none of the threads will start until all have reached +the start of the benchmark loop, and all will have finished before any thread +exits the benchmark loop. (This behavior is also provided by the `KeepRunning()` +API) As such, any global setup or teardown can be wrapped in a check against the thread +index: + +```c++ +static void BM_MultiThreaded(benchmark::State& state) { + if (state.thread_index == 0) { + // Setup code here. + } + for (auto _ : state) { + // Run the test as normal. + } + if (state.thread_index == 0) { + // Teardown code here. + } +} +BENCHMARK(BM_MultiThreaded)->Threads(2); +``` + +If the benchmarked code itself uses threads and you want to compare it to +single-threaded code, you may want to use real-time ("wallclock") measurements +for latency comparisons: + +```c++ +BENCHMARK(BM_test)->Range(8, 8<<10)->UseRealTime(); +``` + +Without `UseRealTime`, CPU time is used by default. + + + +### CPU Timers + +By default, the CPU timer only measures the time spent by the main thread. +If the benchmark itself uses threads internally, this measurement may not +be what you are looking for. Instead, there is a way to measure the total +CPU usage of the process, by all the threads. + +```c++ +void callee(int i); + +static void MyMain(int size) { +#pragma omp parallel for + for(int i = 0; i < size; i++) + callee(i); +} + +static void BM_OpenMP(benchmark::State& state) { + for (auto _ : state) + MyMain(state.range(0)); +} + +// Measure the time spent by the main thread, use it to decide for how long to +// run the benchmark loop. Depending on the internal implementation detail may +// measure to anywhere from near-zero (the overhead spent before/after work +// handoff to worker thread[s]) to the whole single-thread time. +BENCHMARK(BM_OpenMP)->Range(8, 8<<10); + +// Measure the user-visible time, the wall clock (literally, the time that +// has passed on the clock on the wall), use it to decide for how long to +// run the benchmark loop. This will always be meaningful, an will match the +// time spent by the main thread in single-threaded case, in general decreasing +// with the number of internal threads doing the work. +BENCHMARK(BM_OpenMP)->Range(8, 8<<10)->UseRealTime(); + +// Measure the total CPU consumption, use it to decide for how long to +// run the benchmark loop. This will always measure to no less than the +// time spent by the main thread in single-threaded case. +BENCHMARK(BM_OpenMP)->Range(8, 8<<10)->MeasureProcessCPUTime(); + +// A mixture of the last two. Measure the total CPU consumption, but use the +// wall clock to decide for how long to run the benchmark loop. +BENCHMARK(BM_OpenMP)->Range(8, 8<<10)->MeasureProcessCPUTime()->UseRealTime(); +``` + +#### Controlling Timers + +Normally, the entire duration of the work loop (`for (auto _ : state) {}`) +is measured. But sometimes, it is necessary to do some work inside of +that loop, every iteration, but without counting that time to the benchmark time. +That is possible, although it is not recommended, since it has high overhead. + +```c++ +static void BM_SetInsert_With_Timer_Control(benchmark::State& state) { + std::set data; + for (auto _ : state) { + state.PauseTiming(); // Stop timers. They will not count until they are resumed. + data = ConstructRandomSet(state.range(0)); // Do something that should not be measured + state.ResumeTiming(); // And resume timers. They are now counting again. + // The rest will be measured. + for (int j = 0; j < state.range(1); ++j) + data.insert(RandomNumber()); + } +} +BENCHMARK(BM_SetInsert_With_Timer_Control)->Ranges({{1<<10, 8<<10}, {128, 512}}); +``` + + + +### Manual Timing + +For benchmarking something for which neither CPU time nor real-time are +correct or accurate enough, completely manual timing is supported using +the `UseManualTime` function. + +When `UseManualTime` is used, the benchmarked code must call +`SetIterationTime` once per iteration of the benchmark loop to +report the manually measured time. + +An example use case for this is benchmarking GPU execution (e.g. OpenCL +or CUDA kernels, OpenGL or Vulkan or Direct3D draw calls), which cannot +be accurately measured using CPU time or real-time. Instead, they can be +measured accurately using a dedicated API, and these measurement results +can be reported back with `SetIterationTime`. + +```c++ +static void BM_ManualTiming(benchmark::State& state) { + int microseconds = state.range(0); + std::chrono::duration sleep_duration { + static_cast(microseconds) + }; + + for (auto _ : state) { + auto start = std::chrono::high_resolution_clock::now(); + // Simulate some useful workload with a sleep + std::this_thread::sleep_for(sleep_duration); + auto end = std::chrono::high_resolution_clock::now(); + + auto elapsed_seconds = + std::chrono::duration_cast>( + end - start); + + state.SetIterationTime(elapsed_seconds.count()); + } +} +BENCHMARK(BM_ManualTiming)->Range(1, 1<<17)->UseManualTime(); +``` + + + +### Setting the Time Unit + +If a benchmark runs a few milliseconds it may be hard to visually compare the +measured times, since the output data is given in nanoseconds per default. In +order to manually set the time unit, you can specify it manually: + +```c++ +BENCHMARK(BM_test)->Unit(benchmark::kMillisecond); +``` + + + +### Preventing Optimization + +To prevent a value or expression from being optimized away by the compiler +the `benchmark::DoNotOptimize(...)` and `benchmark::ClobberMemory()` +functions can be used. + +```c++ +static void BM_test(benchmark::State& state) { + for (auto _ : state) { + int x = 0; + for (int i=0; i < 64; ++i) { + benchmark::DoNotOptimize(x += i); + } + } +} +``` + +`DoNotOptimize()` forces the *result* of `` to be stored in either +memory or a register. For GNU based compilers it acts as read/write barrier +for global memory. More specifically it forces the compiler to flush pending +writes to memory and reload any other values as necessary. + +Note that `DoNotOptimize()` does not prevent optimizations on `` +in any way. `` may even be removed entirely when the result is already +known. For example: + +```c++ + /* Example 1: `` is removed entirely. */ + int foo(int x) { return x + 42; } + while (...) DoNotOptimize(foo(0)); // Optimized to DoNotOptimize(42); + + /* Example 2: Result of '' is only reused */ + int bar(int) __attribute__((const)); + while (...) DoNotOptimize(bar(0)); // Optimized to: + // int __result__ = bar(0); + // while (...) DoNotOptimize(__result__); +``` + +The second tool for preventing optimizations is `ClobberMemory()`. In essence +`ClobberMemory()` forces the compiler to perform all pending writes to global +memory. Memory managed by block scope objects must be "escaped" using +`DoNotOptimize(...)` before it can be clobbered. In the below example +`ClobberMemory()` prevents the call to `v.push_back(42)` from being optimized +away. + +```c++ +static void BM_vector_push_back(benchmark::State& state) { + for (auto _ : state) { + std::vector v; + v.reserve(1); + benchmark::DoNotOptimize(v.data()); // Allow v.data() to be clobbered. + v.push_back(42); + benchmark::ClobberMemory(); // Force 42 to be written to memory. + } +} +``` + +Note that `ClobberMemory()` is only available for GNU or MSVC based compilers. + + + +### Statistics: Reporting the Mean, Median and Standard Deviation of Repeated Benchmarks + +By default each benchmark is run once and that single result is reported. +However benchmarks are often noisy and a single result may not be representative +of the overall behavior. For this reason it's possible to repeatedly rerun the +benchmark. + +The number of runs of each benchmark is specified globally by the +`--benchmark_repetitions` flag or on a per benchmark basis by calling +`Repetitions` on the registered benchmark object. When a benchmark is run more +than once the mean, median and standard deviation of the runs will be reported. + +Additionally the `--benchmark_report_aggregates_only={true|false}`, +`--benchmark_display_aggregates_only={true|false}` flags or +`ReportAggregatesOnly(bool)`, `DisplayAggregatesOnly(bool)` functions can be +used to change how repeated tests are reported. By default the result of each +repeated run is reported. When `report aggregates only` option is `true`, +only the aggregates (i.e. mean, median and standard deviation, maybe complexity +measurements if they were requested) of the runs is reported, to both the +reporters - standard output (console), and the file. +However when only the `display aggregates only` option is `true`, +only the aggregates are displayed in the standard output, while the file +output still contains everything. +Calling `ReportAggregatesOnly(bool)` / `DisplayAggregatesOnly(bool)` on a +registered benchmark object overrides the value of the appropriate flag for that +benchmark. + + + +### Custom Statistics + +While having mean, median and standard deviation is nice, this may not be +enough for everyone. For example you may want to know what the largest +observation is, e.g. because you have some real-time constraints. This is easy. +The following code will specify a custom statistic to be calculated, defined +by a lambda function. + +```c++ +void BM_spin_empty(benchmark::State& state) { + for (auto _ : state) { + for (int x = 0; x < state.range(0); ++x) { + benchmark::DoNotOptimize(x); + } + } +} + +BENCHMARK(BM_spin_empty) + ->ComputeStatistics("max", [](const std::vector& v) -> double { + return *(std::max_element(std::begin(v), std::end(v))); + }) + ->Arg(512); +``` + + + +### Using RegisterBenchmark(name, fn, args...) + +The `RegisterBenchmark(name, func, args...)` function provides an alternative +way to create and register benchmarks. +`RegisterBenchmark(name, func, args...)` creates, registers, and returns a +pointer to a new benchmark with the specified `name` that invokes +`func(st, args...)` where `st` is a `benchmark::State` object. + +Unlike the `BENCHMARK` registration macros, which can only be used at the global +scope, the `RegisterBenchmark` can be called anywhere. This allows for +benchmark tests to be registered programmatically. + +Additionally `RegisterBenchmark` allows any callable object to be registered +as a benchmark. Including capturing lambdas and function objects. + +For Example: +```c++ +auto BM_test = [](benchmark::State& st, auto Inputs) { /* ... */ }; + +int main(int argc, char** argv) { + for (auto& test_input : { /* ... */ }) + benchmark::RegisterBenchmark(test_input.name(), BM_test, test_input); + benchmark::Initialize(&argc, argv); + benchmark::RunSpecifiedBenchmarks(); +} +``` + + + +### Exiting with an Error + +When errors caused by external influences, such as file I/O and network +communication, occur within a benchmark the +`State::SkipWithError(const char* msg)` function can be used to skip that run +of benchmark and report the error. Note that only future iterations of the +`KeepRunning()` are skipped. For the ranged-for version of the benchmark loop +Users must explicitly exit the loop, otherwise all iterations will be performed. +Users may explicitly return to exit the benchmark immediately. + +The `SkipWithError(...)` function may be used at any point within the benchmark, +including before and after the benchmark loop. Moreover, if `SkipWithError(...)` +has been used, it is not required to reach the benchmark loop and one may return +from the benchmark function early. + +For example: + +```c++ +static void BM_test(benchmark::State& state) { + auto resource = GetResource(); + if (!resource.good()) { + state.SkipWithError("Resource is not good!"); + // KeepRunning() loop will not be entered. + } + while (state.KeepRunning()) { + auto data = resource.read_data(); + if (!resource.good()) { + state.SkipWithError("Failed to read data!"); + break; // Needed to skip the rest of the iteration. + } + do_stuff(data); + } +} + +static void BM_test_ranged_fo(benchmark::State & state) { + auto resource = GetResource(); + if (!resource.good()) { + state.SkipWithError("Resource is not good!"); + return; // Early return is allowed when SkipWithError() has been used. + } + for (auto _ : state) { + auto data = resource.read_data(); + if (!resource.good()) { + state.SkipWithError("Failed to read data!"); + break; // REQUIRED to prevent all further iterations. + } + do_stuff(data); + } +} +``` + + +### A Faster KeepRunning Loop + +In C++11 mode, a ranged-based for loop should be used in preference to +the `KeepRunning` loop for running the benchmarks. For example: + +```c++ +static void BM_Fast(benchmark::State &state) { + for (auto _ : state) { + FastOperation(); + } +} +BENCHMARK(BM_Fast); +``` + +The reason the ranged-for loop is faster than using `KeepRunning`, is +because `KeepRunning` requires a memory load and store of the iteration count +ever iteration, whereas the ranged-for variant is able to keep the iteration count +in a register. + +For example, an empty inner loop of using the ranged-based for method looks like: + +```asm +# Loop Init + mov rbx, qword ptr [r14 + 104] + call benchmark::State::StartKeepRunning() + test rbx, rbx + je .LoopEnd +.LoopHeader: # =>This Inner Loop Header: Depth=1 + add rbx, -1 + jne .LoopHeader +.LoopEnd: +``` + +Compared to an empty `KeepRunning` loop, which looks like: + +```asm +.LoopHeader: # in Loop: Header=BB0_3 Depth=1 + cmp byte ptr [rbx], 1 + jne .LoopInit +.LoopBody: # =>This Inner Loop Header: Depth=1 + mov rax, qword ptr [rbx + 8] + lea rcx, [rax + 1] + mov qword ptr [rbx + 8], rcx + cmp rax, qword ptr [rbx + 104] + jb .LoopHeader + jmp .LoopEnd +.LoopInit: + mov rdi, rbx + call benchmark::State::StartKeepRunning() + jmp .LoopBody +.LoopEnd: +``` + +Unless C++03 compatibility is required, the ranged-for variant of writing +the benchmark loop should be preferred. + + + +### Disabling CPU Frequency Scaling + +If you see this error: + +``` +***WARNING*** CPU scaling is enabled, the benchmark real time measurements may be noisy and will incur extra overhead. +``` + +you might want to disable the CPU frequency scaling while running the benchmark: + +```bash +sudo cpupower frequency-set --governor performance +./mybench +sudo cpupower frequency-set --governor powersave +``` diff --git a/benchmarks/thirdparty/benchmark/WORKSPACE b/benchmarks/thirdparty/benchmark/WORKSPACE new file mode 100755 index 0000000000..c00d12cd17 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/WORKSPACE @@ -0,0 +1,36 @@ +workspace(name = "com_github_google_benchmark") + +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +http_archive( + name = "rules_cc", + strip_prefix = "rules_cc-a508235df92e71d537fcbae0c7c952ea6957a912", + urls = ["https://github.com/bazelbuild/rules_cc/archive/a508235df92e71d537fcbae0c7c952ea6957a912.zip"], +) + +http_archive( + name = "com_google_absl", + sha256 = "f41868f7a938605c92936230081175d1eae87f6ea2c248f41077c8f88316f111", + strip_prefix = "abseil-cpp-20200225.2", + urls = ["https://github.com/abseil/abseil-cpp/archive/20200225.2.tar.gz"], +) + +http_archive( + name = "com_google_googletest", + strip_prefix = "googletest-3f0cf6b62ad1eb50d8736538363d3580dd640c3e", + urls = ["https://github.com/google/googletest/archive/3f0cf6b62ad1eb50d8736538363d3580dd640c3e.zip"], +) + +http_archive( + name = "pybind11", + build_file = "@//bindings/python:pybind11.BUILD", + sha256 = "1eed57bc6863190e35637290f97a20c81cfe4d9090ac0a24f3bbf08f265eb71d", + strip_prefix = "pybind11-2.4.3", + urls = ["https://github.com/pybind/pybind11/archive/v2.4.3.tar.gz"], +) + +new_local_repository( + name = "python_headers", + build_file = "@//bindings/python:python_headers.BUILD", + path = "/usr/include/python3.6", # May be overwritten by setup.py. +) diff --git a/benchmarks/thirdparty/benchmark/_config.yml b/benchmarks/thirdparty/benchmark/_config.yml new file mode 100755 index 0000000000..18854876c6 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/_config.yml @@ -0,0 +1 @@ +theme: jekyll-theme-midnight \ No newline at end of file diff --git a/benchmarks/thirdparty/benchmark/appveyor.yml b/benchmarks/thirdparty/benchmark/appveyor.yml new file mode 100755 index 0000000000..81da955f02 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/appveyor.yml @@ -0,0 +1,50 @@ +version: '{build}' + +image: Visual Studio 2017 + +configuration: + - Debug + - Release + +environment: + matrix: + - compiler: msvc-15-seh + generator: "Visual Studio 15 2017" + + - compiler: msvc-15-seh + generator: "Visual Studio 15 2017 Win64" + + - compiler: msvc-14-seh + generator: "Visual Studio 14 2015" + + - compiler: msvc-14-seh + generator: "Visual Studio 14 2015 Win64" + + - compiler: gcc-5.3.0-posix + generator: "MinGW Makefiles" + cxx_path: 'C:\mingw-w64\i686-5.3.0-posix-dwarf-rt_v4-rev0\mingw32\bin' + APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015 + +matrix: + fast_finish: true + +install: + # git bash conflicts with MinGW makefiles + - if "%generator%"=="MinGW Makefiles" (set "PATH=%PATH:C:\Program Files\Git\usr\bin;=%") + - if not "%cxx_path%"=="" (set "PATH=%PATH%;%cxx_path%") + +build_script: + - md _build -Force + - cd _build + - echo %configuration% + - cmake -G "%generator%" "-DCMAKE_BUILD_TYPE=%configuration%" -DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON .. + - cmake --build . --config %configuration% + +test_script: + - ctest --build-config %configuration% --timeout 300 --output-on-failure + +artifacts: + - path: '_build/CMakeFiles/*.log' + name: logs + - path: '_build/Testing/**/*.xml' + name: test_results diff --git a/benchmarks/thirdparty/benchmark/bindings/python/build_defs.bzl b/benchmarks/thirdparty/benchmark/bindings/python/build_defs.bzl new file mode 100755 index 0000000000..45907aaa5e --- /dev/null +++ b/benchmarks/thirdparty/benchmark/bindings/python/build_defs.bzl @@ -0,0 +1,25 @@ +_SHARED_LIB_SUFFIX = { + "//conditions:default": ".so", + "//:windows": ".dll", +} + +def py_extension(name, srcs, hdrs = [], copts = [], features = [], deps = []): + for shared_lib_suffix in _SHARED_LIB_SUFFIX.values(): + shared_lib_name = name + shared_lib_suffix + native.cc_binary( + name = shared_lib_name, + linkshared = 1, + linkstatic = 1, + srcs = srcs + hdrs, + copts = copts, + features = features, + deps = deps, + ) + + return native.py_library( + name = name, + data = select({ + platform: [name + shared_lib_suffix] + for platform, shared_lib_suffix in _SHARED_LIB_SUFFIX.items() + }), + ) diff --git a/benchmarks/thirdparty/benchmark/bindings/python/google_benchmark/__init__.py b/benchmarks/thirdparty/benchmark/bindings/python/google_benchmark/__init__.py new file mode 100755 index 0000000000..787c423d5d --- /dev/null +++ b/benchmarks/thirdparty/benchmark/bindings/python/google_benchmark/__init__.py @@ -0,0 +1,156 @@ +# Copyright 2020 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Python benchmarking utilities. + +Example usage: + import google_benchmark as benchmark + + @benchmark.register + def my_benchmark(state): + ... # Code executed outside `while` loop is not timed. + + while state: + ... # Code executed within `while` loop is timed. + + if __name__ == '__main__': + benchmark.main() +""" + +from absl import app +from google_benchmark import _benchmark +from google_benchmark._benchmark import ( + Counter, + kNanosecond, + kMicrosecond, + kMillisecond, + oNone, + o1, + oN, + oNSquared, + oNCubed, + oLogN, + oNLogN, + oAuto, + oLambda, +) + + +__all__ = [ + "register", + "main", + "Counter", + "kNanosecond", + "kMicrosecond", + "kMillisecond", + "oNone", + "o1", + "oN", + "oNSquared", + "oNCubed", + "oLogN", + "oNLogN", + "oAuto", + "oLambda", +] + +__version__ = "0.2.0" + + +class __OptionMaker: + """A stateless class to collect benchmark options. + + Collect all decorator calls like @option.range(start=0, limit=1<<5). + """ + + class Options: + """Pure data class to store options calls, along with the benchmarked function.""" + + def __init__(self, func): + self.func = func + self.builder_calls = [] + + @classmethod + def make(cls, func_or_options): + """Make Options from Options or the benchmarked function.""" + if isinstance(func_or_options, cls.Options): + return func_or_options + return cls.Options(func_or_options) + + def __getattr__(self, builder_name): + """Append option call in the Options.""" + + # The function that get returned on @option.range(start=0, limit=1<<5). + def __builder_method(*args, **kwargs): + + # The decorator that get called, either with the benchmared function + # or the previous Options + def __decorator(func_or_options): + options = self.make(func_or_options) + options.builder_calls.append((builder_name, args, kwargs)) + # The decorator returns Options so it is not technically a decorator + # and needs a final call to @regiser + return options + + return __decorator + + return __builder_method + + +# Alias for nicer API. +# We have to instanciate an object, even if stateless, to be able to use __getattr__ +# on option.range +option = __OptionMaker() + + +def register(undefined=None, *, name=None): + """Register function for benchmarking.""" + if undefined is None: + # Decorator is called without parenthesis so we return a decorator + return lambda f: register(f, name=name) + + # We have either the function to benchmark (simple case) or an instance of Options + # (@option._ case). + options = __OptionMaker.make(undefined) + + if name is None: + name = options.func.__name__ + + # We register the benchmark and reproduce all the @option._ calls onto the + # benchmark builder pattern + benchmark = _benchmark.RegisterBenchmark(name, options.func) + for name, args, kwargs in options.builder_calls[::-1]: + getattr(benchmark, name)(*args, **kwargs) + + # return the benchmarked function because the decorator does not modify it + return options.func + + +def _flags_parser(argv): + argv = _benchmark.Initialize(argv) + return app.parse_flags_with_usage(argv) + + +def _run_benchmarks(argv): + if len(argv) > 1: + raise app.UsageError("Too many command-line arguments.") + return _benchmark.RunSpecifiedBenchmarks() + + +def main(argv=None): + return app.run(_run_benchmarks, argv=argv, flags_parser=_flags_parser) + + +# Methods for use with custom main function. +initialize = _benchmark.Initialize +run_benchmarks = _benchmark.RunSpecifiedBenchmarks diff --git a/benchmarks/thirdparty/benchmark/bindings/python/google_benchmark/benchmark.cc b/benchmarks/thirdparty/benchmark/bindings/python/google_benchmark/benchmark.cc new file mode 100755 index 0000000000..a733339769 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/bindings/python/google_benchmark/benchmark.cc @@ -0,0 +1,180 @@ +// Benchmark for Python. + +#include +#include +#include + +#include "pybind11/operators.h" +#include "pybind11/pybind11.h" +#include "pybind11/stl.h" +#include "pybind11/stl_bind.h" + +#include "benchmark/benchmark.h" + +PYBIND11_MAKE_OPAQUE(benchmark::UserCounters); + +namespace { +namespace py = ::pybind11; + +std::vector Initialize(const std::vector& argv) { + // The `argv` pointers here become invalid when this function returns, but + // benchmark holds the pointer to `argv[0]`. We create a static copy of it + // so it persists, and replace the pointer below. + static std::string executable_name(argv[0]); + std::vector ptrs; + ptrs.reserve(argv.size()); + for (auto& arg : argv) { + ptrs.push_back(const_cast(arg.c_str())); + } + ptrs[0] = const_cast(executable_name.c_str()); + int argc = static_cast(argv.size()); + benchmark::Initialize(&argc, ptrs.data()); + std::vector remaining_argv; + remaining_argv.reserve(argc); + for (int i = 0; i < argc; ++i) { + remaining_argv.emplace_back(ptrs[i]); + } + return remaining_argv; +} + +benchmark::internal::Benchmark* RegisterBenchmark(const char* name, + py::function f) { + return benchmark::RegisterBenchmark( + name, [f](benchmark::State& state) { f(&state); }); +} + +PYBIND11_MODULE(_benchmark, m) { + using benchmark::TimeUnit; + py::enum_(m, "TimeUnit") + .value("kNanosecond", TimeUnit::kNanosecond) + .value("kMicrosecond", TimeUnit::kMicrosecond) + .value("kMillisecond", TimeUnit::kMillisecond) + .export_values(); + + using benchmark::BigO; + py::enum_(m, "BigO") + .value("oNone", BigO::oNone) + .value("o1", BigO::o1) + .value("oN", BigO::oN) + .value("oNSquared", BigO::oNSquared) + .value("oNCubed", BigO::oNCubed) + .value("oLogN", BigO::oLogN) + .value("oNLogN", BigO::oLogN) + .value("oAuto", BigO::oAuto) + .value("oLambda", BigO::oLambda) + .export_values(); + + using benchmark::internal::Benchmark; + py::class_(m, "Benchmark") + // For methods returning a pointer tor the current object, reference + // return policy is used to ask pybind not to take ownership oof the + // returned object and avoid calling delete on it. + // https://pybind11.readthedocs.io/en/stable/advanced/functions.html#return-value-policies + // + // For methods taking a const std::vector<...>&, a copy is created + // because a it is bound to a Python list. + // https://pybind11.readthedocs.io/en/stable/advanced/cast/stl.html + .def("unit", &Benchmark::Unit, py::return_value_policy::reference) + .def("arg", &Benchmark::Arg, py::return_value_policy::reference) + .def("args", &Benchmark::Args, py::return_value_policy::reference) + .def("range", &Benchmark::Range, py::return_value_policy::reference, + py::arg("start"), py::arg("limit")) + .def("dense_range", &Benchmark::DenseRange, + py::return_value_policy::reference, py::arg("start"), + py::arg("limit"), py::arg("step") = 1) + .def("ranges", &Benchmark::Ranges, py::return_value_policy::reference) + .def("args_product", &Benchmark::ArgsProduct, + py::return_value_policy::reference) + .def("arg_name", &Benchmark::ArgName, py::return_value_policy::reference) + .def("arg_names", &Benchmark::ArgNames, + py::return_value_policy::reference) + .def("range_pair", &Benchmark::RangePair, + py::return_value_policy::reference, py::arg("lo1"), py::arg("hi1"), + py::arg("lo2"), py::arg("hi2")) + .def("range_multiplier", &Benchmark::RangeMultiplier, + py::return_value_policy::reference) + .def("min_time", &Benchmark::MinTime, py::return_value_policy::reference) + .def("iterations", &Benchmark::Iterations, + py::return_value_policy::reference) + .def("repetitions", &Benchmark::Repetitions, + py::return_value_policy::reference) + .def("report_aggregates_only", &Benchmark::ReportAggregatesOnly, + py::return_value_policy::reference, py::arg("value") = true) + .def("display_aggregates_only", &Benchmark::DisplayAggregatesOnly, + py::return_value_policy::reference, py::arg("value") = true) + .def("measure_process_cpu_time", &Benchmark::MeasureProcessCPUTime, + py::return_value_policy::reference) + .def("use_real_time", &Benchmark::UseRealTime, + py::return_value_policy::reference) + .def("use_manual_time", &Benchmark::UseManualTime, + py::return_value_policy::reference) + .def( + "complexity", + (Benchmark * (Benchmark::*)(benchmark::BigO)) & Benchmark::Complexity, + py::return_value_policy::reference, + py::arg("complexity") = benchmark::oAuto); + + using benchmark::Counter; + py::class_ py_counter(m, "Counter"); + + py::enum_(py_counter, "Flags") + .value("kDefaults", Counter::Flags::kDefaults) + .value("kIsRate", Counter::Flags::kIsRate) + .value("kAvgThreads", Counter::Flags::kAvgThreads) + .value("kAvgThreadsRate", Counter::Flags::kAvgThreadsRate) + .value("kIsIterationInvariant", Counter::Flags::kIsIterationInvariant) + .value("kIsIterationInvariantRate", + Counter::Flags::kIsIterationInvariantRate) + .value("kAvgIterations", Counter::Flags::kAvgIterations) + .value("kAvgIterationsRate", Counter::Flags::kAvgIterationsRate) + .value("kInvert", Counter::Flags::kInvert) + .export_values() + .def(py::self | py::self); + + py::enum_(py_counter, "OneK") + .value("kIs1000", Counter::OneK::kIs1000) + .value("kIs1024", Counter::OneK::kIs1024) + .export_values(); + + py_counter + .def(py::init(), + py::arg("value") = 0., py::arg("flags") = Counter::kDefaults, + py::arg("k") = Counter::kIs1000) + .def(py::init([](double value) { return Counter(value); })) + .def_readwrite("value", &Counter::value) + .def_readwrite("flags", &Counter::flags) + .def_readwrite("oneK", &Counter::oneK); + py::implicitly_convertible(); + py::implicitly_convertible(); + + py::bind_map(m, "UserCounters"); + + using benchmark::State; + py::class_(m, "State") + .def("__bool__", &State::KeepRunning) + .def_property_readonly("keep_running", &State::KeepRunning) + .def("pause_timing", &State::PauseTiming) + .def("resume_timing", &State::ResumeTiming) + .def("skip_with_error", &State::SkipWithError) + .def_property_readonly("error_occured", &State::error_occurred) + .def("set_iteration_time", &State::SetIterationTime) + .def_property("bytes_processed", &State::bytes_processed, + &State::SetBytesProcessed) + .def_property("complexity_n", &State::complexity_length_n, + &State::SetComplexityN) + .def_property("items_processed", &State::items_processed, + &State::SetItemsProcessed) + .def("set_label", (void (State::*)(const char*)) & State::SetLabel) + .def("range", &State::range, py::arg("pos") = 0) + .def_property_readonly("iterations", &State::iterations) + .def_readwrite("counters", &State::counters) + .def_readonly("thread_index", &State::thread_index) + .def_readonly("threads", &State::threads); + + m.def("Initialize", Initialize); + m.def("RegisterBenchmark", RegisterBenchmark, + py::return_value_policy::reference); + m.def("RunSpecifiedBenchmarks", + []() { benchmark::RunSpecifiedBenchmarks(); }); +}; +} // namespace diff --git a/benchmarks/thirdparty/benchmark/bindings/python/google_benchmark/example.py b/benchmarks/thirdparty/benchmark/bindings/python/google_benchmark/example.py new file mode 100755 index 0000000000..9134e8cffe --- /dev/null +++ b/benchmarks/thirdparty/benchmark/bindings/python/google_benchmark/example.py @@ -0,0 +1,136 @@ +# Copyright 2020 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Example of Python using C++ benchmark framework. + +To run this example, you must first install the `google_benchmark` Python package. + +To install using `setup.py`, download and extract the `google_benchmark` source. +In the extracted directory, execute: + python setup.py install +""" + +import random +import time + +import google_benchmark as benchmark +from google_benchmark import Counter + + +@benchmark.register +def empty(state): + while state: + pass + + +@benchmark.register +def sum_million(state): + while state: + sum(range(1_000_000)) + +@benchmark.register +def pause_timing(state): + """Pause timing every iteration.""" + while state: + # Construct a list of random ints every iteration without timing it + state.pause_timing() + random_list = [random.randint(0, 100) for _ in range(100)] + state.resume_timing() + # Time the in place sorting algorithm + random_list.sort() + + +@benchmark.register +def skipped(state): + if True: # Test some predicate here. + state.skip_with_error("some error") + return # NOTE: You must explicitly return, or benchmark will continue. + + ... # Benchmark code would be here. + + +@benchmark.register +def manual_timing(state): + while state: + # Manually count Python CPU time + start = time.perf_counter() # perf_counter_ns() in Python 3.7+ + # Something to benchmark + time.sleep(0.01) + end = time.perf_counter() + state.set_iteration_time(end - start) + + +@benchmark.register +def custom_counters(state): + """Collect cutom metric using benchmark.Counter.""" + num_foo = 0.0 + while state: + # Benchmark some code here + pass + # Collect some custom metric named foo + num_foo += 0.13 + + # Automatic Counter from numbers. + state.counters["foo"] = num_foo + # Set a counter as a rate. + state.counters["foo_rate"] = Counter(num_foo, Counter.kIsRate) + # Set a counter as an inverse of rate. + state.counters["foo_inv_rate"] = Counter(num_foo, Counter.kIsRate | Counter.kInvert) + # Set a counter as a thread-average quantity. + state.counters["foo_avg"] = Counter(num_foo, Counter.kAvgThreads) + # There's also a combined flag: + state.counters["foo_avg_rate"] = Counter(num_foo, Counter.kAvgThreadsRate) + + +@benchmark.register +@benchmark.option.measure_process_cpu_time() +@benchmark.option.use_real_time() +def with_options(state): + while state: + sum(range(1_000_000)) + + +@benchmark.register(name="sum_million_microseconds") +@benchmark.option.unit(benchmark.kMicrosecond) +def with_options(state): + while state: + sum(range(1_000_000)) + + +@benchmark.register +@benchmark.option.arg(100) +@benchmark.option.arg(1000) +def passing_argument(state): + while state: + sum(range(state.range(0))) + + +@benchmark.register +@benchmark.option.range(8, limit=8 << 10) +def using_range(state): + while state: + sum(range(state.range(0))) + + +@benchmark.register +@benchmark.option.range_multiplier(2) +@benchmark.option.range(1 << 10, 1 << 18) +@benchmark.option.complexity(benchmark.oN) +def computing_complexity(state): + while state: + sum(range(state.range(0))) + state.complexity_n = state.range(0) + + +if __name__ == "__main__": + benchmark.main() diff --git a/benchmarks/thirdparty/benchmark/bindings/python/pybind11.BUILD b/benchmarks/thirdparty/benchmark/bindings/python/pybind11.BUILD new file mode 100755 index 0000000000..bc83350038 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/bindings/python/pybind11.BUILD @@ -0,0 +1,20 @@ +cc_library( + name = "pybind11", + hdrs = glob( + include = [ + "include/pybind11/*.h", + "include/pybind11/detail/*.h", + ], + exclude = [ + "include/pybind11/common.h", + "include/pybind11/eigen.h", + ], + ), + copts = [ + "-fexceptions", + "-Wno-undefined-inline", + "-Wno-pragma-once-outside-header", + ], + includes = ["include"], + visibility = ["//visibility:public"], +) diff --git a/benchmarks/thirdparty/benchmark/bindings/python/python_headers.BUILD b/benchmarks/thirdparty/benchmark/bindings/python/python_headers.BUILD new file mode 100755 index 0000000000..9c34cf6ca4 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/bindings/python/python_headers.BUILD @@ -0,0 +1,6 @@ +cc_library( + name = "python_headers", + hdrs = glob(["**/*.h"]), + includes = ["."], + visibility = ["//visibility:public"], +) diff --git a/benchmarks/thirdparty/benchmark/bindings/python/requirements.txt b/benchmarks/thirdparty/benchmark/bindings/python/requirements.txt new file mode 100755 index 0000000000..f5bbe7eca5 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/bindings/python/requirements.txt @@ -0,0 +1,2 @@ +absl-py>=0.7.1 + diff --git a/benchmarks/thirdparty/benchmark/cmake/AddCXXCompilerFlag.cmake b/benchmarks/thirdparty/benchmark/cmake/AddCXXCompilerFlag.cmake new file mode 100755 index 0000000000..d0d2099814 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/cmake/AddCXXCompilerFlag.cmake @@ -0,0 +1,74 @@ +# - Adds a compiler flag if it is supported by the compiler +# +# This function checks that the supplied compiler flag is supported and then +# adds it to the corresponding compiler flags +# +# add_cxx_compiler_flag( []) +# +# - Example +# +# include(AddCXXCompilerFlag) +# add_cxx_compiler_flag(-Wall) +# add_cxx_compiler_flag(-no-strict-aliasing RELEASE) +# Requires CMake 2.6+ + +if(__add_cxx_compiler_flag) + return() +endif() +set(__add_cxx_compiler_flag INCLUDED) + +include(CheckCXXCompilerFlag) + +function(mangle_compiler_flag FLAG OUTPUT) + string(TOUPPER "HAVE_CXX_FLAG_${FLAG}" SANITIZED_FLAG) + string(REPLACE "+" "X" SANITIZED_FLAG ${SANITIZED_FLAG}) + string(REGEX REPLACE "[^A-Za-z_0-9]" "_" SANITIZED_FLAG ${SANITIZED_FLAG}) + string(REGEX REPLACE "_+" "_" SANITIZED_FLAG ${SANITIZED_FLAG}) + set(${OUTPUT} "${SANITIZED_FLAG}" PARENT_SCOPE) +endfunction(mangle_compiler_flag) + +function(add_cxx_compiler_flag FLAG) + mangle_compiler_flag("${FLAG}" MANGLED_FLAG) + set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}") + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${FLAG}") + check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG}) + set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}") + if(${MANGLED_FLAG}) + set(VARIANT ${ARGV1}) + if(ARGV1) + string(TOUPPER "_${VARIANT}" VARIANT) + endif() + set(CMAKE_CXX_FLAGS${VARIANT} "${CMAKE_CXX_FLAGS${VARIANT}} ${BENCHMARK_CXX_FLAGS${VARIANT}} ${FLAG}" PARENT_SCOPE) + endif() +endfunction() + +function(add_required_cxx_compiler_flag FLAG) + mangle_compiler_flag("${FLAG}" MANGLED_FLAG) + set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}") + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${FLAG}") + check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG}) + set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}") + if(${MANGLED_FLAG}) + set(VARIANT ${ARGV1}) + if(ARGV1) + string(TOUPPER "_${VARIANT}" VARIANT) + endif() + set(CMAKE_CXX_FLAGS${VARIANT} "${CMAKE_CXX_FLAGS${VARIANT}} ${FLAG}" PARENT_SCOPE) + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE) + set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE) + set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE) + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${FLAG}" PARENT_SCOPE) + else() + message(FATAL_ERROR "Required flag '${FLAG}' is not supported by the compiler") + endif() +endfunction() + +function(check_cxx_warning_flag FLAG) + mangle_compiler_flag("${FLAG}" MANGLED_FLAG) + set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}") + # Add -Werror to ensure the compiler generates an error if the warning flag + # doesn't exist. + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -Werror ${FLAG}") + check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG}) + set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}") +endfunction() diff --git a/benchmarks/thirdparty/benchmark/cmake/CXXFeatureCheck.cmake b/benchmarks/thirdparty/benchmark/cmake/CXXFeatureCheck.cmake new file mode 100755 index 0000000000..62e6741fe3 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/cmake/CXXFeatureCheck.cmake @@ -0,0 +1,69 @@ +# - Compile and run code to check for C++ features +# +# This functions compiles a source file under the `cmake` folder +# and adds the corresponding `HAVE_[FILENAME]` flag to the CMake +# environment +# +# cxx_feature_check( []) +# +# - Example +# +# include(CXXFeatureCheck) +# cxx_feature_check(STD_REGEX) +# Requires CMake 2.8.12+ + +if(__cxx_feature_check) + return() +endif() +set(__cxx_feature_check INCLUDED) + +function(cxx_feature_check FILE) + string(TOLOWER ${FILE} FILE) + string(TOUPPER ${FILE} VAR) + string(TOUPPER "HAVE_${VAR}" FEATURE) + if (DEFINED HAVE_${VAR}) + set(HAVE_${VAR} 1 PARENT_SCOPE) + add_definitions(-DHAVE_${VAR}) + return() + endif() + + if (ARGC GREATER 1) + message(STATUS "Enabling additional flags: ${ARGV1}") + list(APPEND BENCHMARK_CXX_LINKER_FLAGS ${ARGV1}) + endif() + + if (NOT DEFINED COMPILE_${FEATURE}) + message(STATUS "Performing Test ${FEATURE}") + if(CMAKE_CROSSCOMPILING) + try_compile(COMPILE_${FEATURE} + ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp + CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS} + LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES}) + if(COMPILE_${FEATURE}) + message(WARNING + "If you see build failures due to cross compilation, try setting HAVE_${VAR} to 0") + set(RUN_${FEATURE} 0 CACHE INTERNAL "") + else() + set(RUN_${FEATURE} 1 CACHE INTERNAL "") + endif() + else() + message(STATUS "Performing Test ${FEATURE}") + try_run(RUN_${FEATURE} COMPILE_${FEATURE} + ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp + CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS} + LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES}) + endif() + endif() + + if(RUN_${FEATURE} EQUAL 0) + message(STATUS "Performing Test ${FEATURE} -- success") + set(HAVE_${VAR} 1 PARENT_SCOPE) + add_definitions(-DHAVE_${VAR}) + else() + if(NOT COMPILE_${FEATURE}) + message(STATUS "Performing Test ${FEATURE} -- failed to compile") + else() + message(STATUS "Performing Test ${FEATURE} -- compiled but failed to run") + endif() + endif() +endfunction() diff --git a/benchmarks/thirdparty/benchmark/cmake/Config.cmake.in b/benchmarks/thirdparty/benchmark/cmake/Config.cmake.in new file mode 100755 index 0000000000..6e9256eea8 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/cmake/Config.cmake.in @@ -0,0 +1 @@ +include("${CMAKE_CURRENT_LIST_DIR}/@targets_export_name@.cmake") diff --git a/benchmarks/thirdparty/benchmark/cmake/GetGitVersion.cmake b/benchmarks/thirdparty/benchmark/cmake/GetGitVersion.cmake new file mode 100755 index 0000000000..4f10f226d7 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/cmake/GetGitVersion.cmake @@ -0,0 +1,54 @@ +# - Returns a version string from Git tags +# +# This function inspects the annotated git tags for the project and returns a string +# into a CMake variable +# +# get_git_version() +# +# - Example +# +# include(GetGitVersion) +# get_git_version(GIT_VERSION) +# +# Requires CMake 2.8.11+ +find_package(Git) + +if(__get_git_version) + return() +endif() +set(__get_git_version INCLUDED) + +function(get_git_version var) + if(GIT_EXECUTABLE) + execute_process(COMMAND ${GIT_EXECUTABLE} describe --match "v[0-9]*.[0-9]*.[0-9]*" --abbrev=8 + WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} + RESULT_VARIABLE status + OUTPUT_VARIABLE GIT_VERSION + ERROR_QUIET) + if(${status}) + set(GIT_VERSION "v0.0.0") + else() + string(STRIP ${GIT_VERSION} GIT_VERSION) + string(REGEX REPLACE "-[0-9]+-g" "-" GIT_VERSION ${GIT_VERSION}) + endif() + + # Work out if the repository is dirty + execute_process(COMMAND ${GIT_EXECUTABLE} update-index -q --refresh + WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} + OUTPUT_QUIET + ERROR_QUIET) + execute_process(COMMAND ${GIT_EXECUTABLE} diff-index --name-only HEAD -- + WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} + OUTPUT_VARIABLE GIT_DIFF_INDEX + ERROR_QUIET) + string(COMPARE NOTEQUAL "${GIT_DIFF_INDEX}" "" GIT_DIRTY) + if (${GIT_DIRTY}) + set(GIT_VERSION "${GIT_VERSION}-dirty") + endif() + else() + set(GIT_VERSION "v0.0.0") + endif() + + message(STATUS "git Version: ${GIT_VERSION}") + set(${var} ${GIT_VERSION} PARENT_SCOPE) +endfunction() diff --git a/benchmarks/thirdparty/benchmark/cmake/GoogleTest.cmake b/benchmarks/thirdparty/benchmark/cmake/GoogleTest.cmake new file mode 100755 index 0000000000..dd611fc875 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/cmake/GoogleTest.cmake @@ -0,0 +1,41 @@ +# Download and unpack googletest at configure time +set(GOOGLETEST_PREFIX "${benchmark_BINARY_DIR}/third_party/googletest") +configure_file(${benchmark_SOURCE_DIR}/cmake/GoogleTest.cmake.in ${GOOGLETEST_PREFIX}/CMakeLists.txt @ONLY) + +set(GOOGLETEST_PATH "${CMAKE_CURRENT_SOURCE_DIR}/googletest" CACHE PATH "") # Mind the quotes +execute_process(COMMAND ${CMAKE_COMMAND} -G "${CMAKE_GENERATOR}" + -DALLOW_DOWNLOADING_GOOGLETEST=${BENCHMARK_DOWNLOAD_DEPENDENCIES} -DGOOGLETEST_PATH:PATH=${GOOGLETEST_PATH} . + RESULT_VARIABLE result + WORKING_DIRECTORY ${GOOGLETEST_PREFIX} +) + +if(result) + message(FATAL_ERROR "CMake step for googletest failed: ${result}") +endif() + +execute_process( + COMMAND ${CMAKE_COMMAND} --build . + RESULT_VARIABLE result + WORKING_DIRECTORY ${GOOGLETEST_PREFIX} +) + +if(result) + message(FATAL_ERROR "Build step for googletest failed: ${result}") +endif() + +# Prevent overriding the parent project's compiler/linker +# settings on Windows +set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) + +include(${GOOGLETEST_PREFIX}/googletest-paths.cmake) + +# Add googletest directly to our build. This defines +# the gtest and gtest_main targets. +add_subdirectory(${GOOGLETEST_SOURCE_DIR} + ${GOOGLETEST_BINARY_DIR} + EXCLUDE_FROM_ALL) + +set_target_properties(gtest PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES $) +set_target_properties(gtest_main PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES $) +set_target_properties(gmock PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES $) +set_target_properties(gmock_main PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES $) diff --git a/benchmarks/thirdparty/benchmark/cmake/GoogleTest.cmake.in b/benchmarks/thirdparty/benchmark/cmake/GoogleTest.cmake.in new file mode 100755 index 0000000000..fd957ff564 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/cmake/GoogleTest.cmake.in @@ -0,0 +1,58 @@ +cmake_minimum_required(VERSION 2.8.12) + +project(googletest-download NONE) + +# Enable ExternalProject CMake module +include(ExternalProject) + +option(ALLOW_DOWNLOADING_GOOGLETEST "If googletest src tree is not found in location specified by GOOGLETEST_PATH, do fetch the archive from internet" OFF) +set(GOOGLETEST_PATH "/usr/src/googletest" CACHE PATH + "Path to the googletest root tree. Should contain googletest and googlemock subdirs. And CMakeLists.txt in root, and in both of these subdirs") + +# Download and install GoogleTest + +message(STATUS "Looking for Google Test sources") +message(STATUS "Looking for Google Test sources in ${GOOGLETEST_PATH}") +if(EXISTS "${GOOGLETEST_PATH}" AND IS_DIRECTORY "${GOOGLETEST_PATH}" AND EXISTS "${GOOGLETEST_PATH}/CMakeLists.txt" AND + EXISTS "${GOOGLETEST_PATH}/googletest" AND IS_DIRECTORY "${GOOGLETEST_PATH}/googletest" AND EXISTS "${GOOGLETEST_PATH}/googletest/CMakeLists.txt" AND + EXISTS "${GOOGLETEST_PATH}/googlemock" AND IS_DIRECTORY "${GOOGLETEST_PATH}/googlemock" AND EXISTS "${GOOGLETEST_PATH}/googlemock/CMakeLists.txt") + message(STATUS "Found Google Test in ${GOOGLETEST_PATH}") + + ExternalProject_Add( + googletest + PREFIX "${CMAKE_BINARY_DIR}" + DOWNLOAD_DIR "${CMAKE_BINARY_DIR}/download" + SOURCE_DIR "${GOOGLETEST_PATH}" # use existing src dir. + BINARY_DIR "${CMAKE_BINARY_DIR}/build" + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" + ) +else() + if(NOT ALLOW_DOWNLOADING_GOOGLETEST) + message(SEND_ERROR "Did not find Google Test sources! Either pass correct path in GOOGLETEST_PATH, or enable BENCHMARK_DOWNLOAD_DEPENDENCIES, or disable BENCHMARK_ENABLE_GTEST_TESTS / BENCHMARK_ENABLE_TESTING.") + else() + message(WARNING "Did not find Google Test sources! Fetching from web...") + ExternalProject_Add( + googletest + GIT_REPOSITORY https://github.com/google/googletest.git + GIT_TAG master + PREFIX "${CMAKE_BINARY_DIR}" + STAMP_DIR "${CMAKE_BINARY_DIR}/stamp" + DOWNLOAD_DIR "${CMAKE_BINARY_DIR}/download" + SOURCE_DIR "${CMAKE_BINARY_DIR}/src" + BINARY_DIR "${CMAKE_BINARY_DIR}/build" + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" + ) + endif() +endif() + +ExternalProject_Get_Property(googletest SOURCE_DIR BINARY_DIR) +file(WRITE googletest-paths.cmake +"set(GOOGLETEST_SOURCE_DIR \"${SOURCE_DIR}\") +set(GOOGLETEST_BINARY_DIR \"${BINARY_DIR}\") +") diff --git a/benchmarks/thirdparty/benchmark/cmake/benchmark.pc.in b/benchmarks/thirdparty/benchmark/cmake/benchmark.pc.in new file mode 100755 index 0000000000..43ca8f91d7 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/cmake/benchmark.pc.in @@ -0,0 +1,12 @@ +prefix=@CMAKE_INSTALL_PREFIX@ +exec_prefix=${prefix} +libdir=${prefix}/lib +includedir=${prefix}/include + +Name: @PROJECT_NAME@ +Description: Google microbenchmark framework +Version: @VERSION@ + +Libs: -L${libdir} -lbenchmark +Libs.private: -lpthread +Cflags: -I${includedir} diff --git a/benchmarks/thirdparty/benchmark/cmake/gnu_posix_regex.cpp b/benchmarks/thirdparty/benchmark/cmake/gnu_posix_regex.cpp new file mode 100755 index 0000000000..b5b91cdab7 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/cmake/gnu_posix_regex.cpp @@ -0,0 +1,12 @@ +#include +#include +int main() { + std::string str = "test0159"; + regex_t re; + int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB); + if (ec != 0) { + return ec; + } + return regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0; +} + diff --git a/benchmarks/thirdparty/benchmark/cmake/llvm-toolchain.cmake b/benchmarks/thirdparty/benchmark/cmake/llvm-toolchain.cmake new file mode 100755 index 0000000000..fc119e52fd --- /dev/null +++ b/benchmarks/thirdparty/benchmark/cmake/llvm-toolchain.cmake @@ -0,0 +1,8 @@ +find_package(LLVMAr REQUIRED) +set(CMAKE_AR "${LLVMAR_EXECUTABLE}" CACHE FILEPATH "" FORCE) + +find_package(LLVMNm REQUIRED) +set(CMAKE_NM "${LLVMNM_EXECUTABLE}" CACHE FILEPATH "" FORCE) + +find_package(LLVMRanLib REQUIRED) +set(CMAKE_RANLIB "${LLVMRANLIB_EXECUTABLE}" CACHE FILEPATH "" FORCE) diff --git a/benchmarks/thirdparty/benchmark/cmake/posix_regex.cpp b/benchmarks/thirdparty/benchmark/cmake/posix_regex.cpp new file mode 100755 index 0000000000..466dc62560 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/cmake/posix_regex.cpp @@ -0,0 +1,14 @@ +#include +#include +int main() { + std::string str = "test0159"; + regex_t re; + int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB); + if (ec != 0) { + return ec; + } + int ret = regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0; + regfree(&re); + return ret; +} + diff --git a/benchmarks/thirdparty/benchmark/cmake/split_list.cmake b/benchmarks/thirdparty/benchmark/cmake/split_list.cmake new file mode 100755 index 0000000000..67aed3fdc8 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/cmake/split_list.cmake @@ -0,0 +1,3 @@ +macro(split_list listname) + string(REPLACE ";" " " ${listname} "${${listname}}") +endmacro() diff --git a/benchmarks/thirdparty/benchmark/cmake/std_regex.cpp b/benchmarks/thirdparty/benchmark/cmake/std_regex.cpp new file mode 100755 index 0000000000..696f2a26bc --- /dev/null +++ b/benchmarks/thirdparty/benchmark/cmake/std_regex.cpp @@ -0,0 +1,10 @@ +#include +#include +int main() { + const std::string str = "test0159"; + std::regex re; + re = std::regex("^[a-z]+[0-9]+$", + std::regex_constants::extended | std::regex_constants::nosubs); + return std::regex_search(str, re) ? 0 : -1; +} + diff --git a/benchmarks/thirdparty/benchmark/cmake/steady_clock.cpp b/benchmarks/thirdparty/benchmark/cmake/steady_clock.cpp new file mode 100755 index 0000000000..66d50d17e9 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/cmake/steady_clock.cpp @@ -0,0 +1,7 @@ +#include + +int main() { + typedef std::chrono::steady_clock Clock; + Clock::time_point tp = Clock::now(); + ((void)tp); +} diff --git a/benchmarks/thirdparty/benchmark/cmake/thread_safety_attributes.cpp b/benchmarks/thirdparty/benchmark/cmake/thread_safety_attributes.cpp new file mode 100755 index 0000000000..46161babdb --- /dev/null +++ b/benchmarks/thirdparty/benchmark/cmake/thread_safety_attributes.cpp @@ -0,0 +1,4 @@ +#define HAVE_THREAD_SAFETY_ATTRIBUTES +#include "../src/mutex.h" + +int main() {} diff --git a/benchmarks/thirdparty/benchmark/conan/CMakeLists.txt b/benchmarks/thirdparty/benchmark/conan/CMakeLists.txt new file mode 100755 index 0000000000..15b92ca91a --- /dev/null +++ b/benchmarks/thirdparty/benchmark/conan/CMakeLists.txt @@ -0,0 +1,7 @@ +cmake_minimum_required(VERSION 2.8.11) +project(cmake_wrapper) + +include(conanbuildinfo.cmake) +conan_basic_setup() + +include(${CMAKE_SOURCE_DIR}/CMakeListsOriginal.txt) diff --git a/benchmarks/thirdparty/benchmark/conan/test_package/CMakeLists.txt b/benchmarks/thirdparty/benchmark/conan/test_package/CMakeLists.txt new file mode 100755 index 0000000000..089a6c729d --- /dev/null +++ b/benchmarks/thirdparty/benchmark/conan/test_package/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 2.8.11) +project(test_package) + +set(CMAKE_VERBOSE_MAKEFILE TRUE) + +include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake) +conan_basic_setup() + +add_executable(${PROJECT_NAME} test_package.cpp) +target_link_libraries(${PROJECT_NAME} ${CONAN_LIBS}) diff --git a/benchmarks/thirdparty/benchmark/conan/test_package/conanfile.py b/benchmarks/thirdparty/benchmark/conan/test_package/conanfile.py new file mode 100755 index 0000000000..d63f4088c9 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/conan/test_package/conanfile.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from conans import ConanFile, CMake +import os + + +class TestPackageConan(ConanFile): + settings = "os", "compiler", "build_type", "arch" + generators = "cmake" + + def build(self): + cmake = CMake(self) + cmake.configure() + cmake.build() + + def test(self): + bin_path = os.path.join("bin", "test_package") + self.run(bin_path, run_environment=True) diff --git a/benchmarks/thirdparty/benchmark/conan/test_package/test_package.cpp b/benchmarks/thirdparty/benchmark/conan/test_package/test_package.cpp new file mode 100755 index 0000000000..4fa7ec0bf9 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/conan/test_package/test_package.cpp @@ -0,0 +1,18 @@ +#include "benchmark/benchmark.h" + +void BM_StringCreation(benchmark::State& state) { + while (state.KeepRunning()) + std::string empty_string; +} + +BENCHMARK(BM_StringCreation); + +void BM_StringCopy(benchmark::State& state) { + std::string x = "hello"; + while (state.KeepRunning()) + std::string copy(x); +} + +BENCHMARK(BM_StringCopy); + +BENCHMARK_MAIN(); diff --git a/benchmarks/thirdparty/benchmark/conanfile.py b/benchmarks/thirdparty/benchmark/conanfile.py new file mode 100755 index 0000000000..e31fc5268a --- /dev/null +++ b/benchmarks/thirdparty/benchmark/conanfile.py @@ -0,0 +1,79 @@ +from conans import ConanFile, CMake, tools +from conans.errors import ConanInvalidConfiguration +import shutil +import os + + +class GoogleBenchmarkConan(ConanFile): + name = "benchmark" + description = "A microbenchmark support library." + topics = ("conan", "benchmark", "google", "microbenchmark") + url = "https://github.com/google/benchmark" + homepage = "https://github.com/google/benchmark" + author = "Google Inc." + license = "Apache-2.0" + exports_sources = ["*"] + generators = "cmake" + + settings = "arch", "build_type", "compiler", "os" + options = { + "shared": [True, False], + "fPIC": [True, False], + "enable_lto": [True, False], + "enable_exceptions": [True, False] + } + default_options = {"shared": False, "fPIC": True, "enable_lto": False, "enable_exceptions": True} + + _build_subfolder = "." + + def source(self): + # Wrap the original CMake file to call conan_basic_setup + shutil.move("CMakeLists.txt", "CMakeListsOriginal.txt") + shutil.move(os.path.join("conan", "CMakeLists.txt"), "CMakeLists.txt") + + def config_options(self): + if self.settings.os == "Windows": + if self.settings.compiler == "Visual Studio" and float(self.settings.compiler.version.value) <= 12: + raise ConanInvalidConfiguration("{} {} does not support Visual Studio <= 12".format(self.name, self.version)) + del self.options.fPIC + + def configure(self): + if self.settings.os == "Windows" and self.options.shared: + raise ConanInvalidConfiguration("Windows shared builds are not supported right now, see issue #639") + + def _configure_cmake(self): + cmake = CMake(self) + + cmake.definitions["BENCHMARK_ENABLE_TESTING"] = "OFF" + cmake.definitions["BENCHMARK_ENABLE_GTEST_TESTS"] = "OFF" + cmake.definitions["BENCHMARK_ENABLE_LTO"] = "ON" if self.options.enable_lto else "OFF" + cmake.definitions["BENCHMARK_ENABLE_EXCEPTIONS"] = "ON" if self.options.enable_exceptions else "OFF" + + # See https://github.com/google/benchmark/pull/638 for Windows 32 build explanation + if self.settings.os != "Windows": + cmake.definitions["BENCHMARK_BUILD_32_BITS"] = "ON" if "64" not in str(self.settings.arch) else "OFF" + cmake.definitions["BENCHMARK_USE_LIBCXX"] = "ON" if (str(self.settings.compiler.libcxx) == "libc++") else "OFF" + else: + cmake.definitions["BENCHMARK_USE_LIBCXX"] = "OFF" + + cmake.configure(build_folder=self._build_subfolder) + return cmake + + def build(self): + cmake = self._configure_cmake() + cmake.build() + + def package(self): + cmake = self._configure_cmake() + cmake.install() + + self.copy(pattern="LICENSE", dst="licenses") + + def package_info(self): + self.cpp_info.libs = tools.collect_libs(self) + if self.settings.os == "Linux": + self.cpp_info.libs.extend(["pthread", "rt"]) + elif self.settings.os == "Windows": + self.cpp_info.libs.append("shlwapi") + elif self.settings.os == "SunOS": + self.cpp_info.libs.append("kstat") diff --git a/benchmarks/thirdparty/benchmark/dependencies.md b/benchmarks/thirdparty/benchmark/dependencies.md new file mode 100755 index 0000000000..6289b4e354 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/dependencies.md @@ -0,0 +1,18 @@ +# Build tool dependency policy + +To ensure the broadest compatibility when building the benchmark library, but +still allow forward progress, we require any build tooling to be available for: + +* Debian stable AND +* The last two Ubuntu LTS releases AND + +Currently, this means using build tool versions that are available for Ubuntu +16.04 (Xenial), Ubuntu 18.04 (Bionic), and Debian stretch. + +_Note, [travis](.travis.yml) runs under Ubuntu 14.04 (Trusty) for linux builds._ + +## cmake +The current supported version is cmake 3.5.1 as of 2018-06-06. + +_Note, this version is also available for Ubuntu 14.04, the previous Ubuntu LTS +release, as `cmake3`._ diff --git a/benchmarks/thirdparty/benchmark/docs/AssemblyTests.md b/benchmarks/thirdparty/benchmark/docs/AssemblyTests.md new file mode 100755 index 0000000000..1fbdc269b5 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/docs/AssemblyTests.md @@ -0,0 +1,147 @@ +# Assembly Tests + +The Benchmark library provides a number of functions whose primary +purpose in to affect assembly generation, including `DoNotOptimize` +and `ClobberMemory`. In addition there are other functions, +such as `KeepRunning`, for which generating good assembly is paramount. + +For these functions it's important to have tests that verify the +correctness and quality of the implementation. This requires testing +the code generated by the compiler. + +This document describes how the Benchmark library tests compiler output, +as well as how to properly write new tests. + + +## Anatomy of a Test + +Writing a test has two steps: + +* Write the code you want to generate assembly for. +* Add `// CHECK` lines to match against the verified assembly. + +Example: +```c++ + +// CHECK-LABEL: test_add: +extern "C" int test_add() { + extern int ExternInt; + return ExternInt + 1; + + // CHECK: movl ExternInt(%rip), %eax + // CHECK: addl %eax + // CHECK: ret +} + +``` + +#### LLVM Filecheck + +[LLVM's Filecheck](https://llvm.org/docs/CommandGuide/FileCheck.html) +is used to test the generated assembly against the `// CHECK` lines +specified in the tests source file. Please see the documentation +linked above for information on how to write `CHECK` directives. + +#### Tips and Tricks: + +* Tests should match the minimal amount of output required to establish +correctness. `CHECK` directives don't have to match on the exact next line +after the previous match, so tests should omit checks for unimportant +bits of assembly. ([`CHECK-NEXT`](https://llvm.org/docs/CommandGuide/FileCheck.html#the-check-next-directive) +can be used to ensure a match occurs exactly after the previous match). + +* The tests are compiled with `-O3 -g0`. So we're only testing the +optimized output. + +* The assembly output is further cleaned up using `tools/strip_asm.py`. +This removes comments, assembler directives, and unused labels before +the test is run. + +* The generated and stripped assembly file for a test is output under +`/test/.s` + +* Filecheck supports using [`CHECK` prefixes](https://llvm.org/docs/CommandGuide/FileCheck.html#cmdoption-check-prefixes) +to specify lines that should only match in certain situations. +The Benchmark tests use `CHECK-CLANG` and `CHECK-GNU` for lines that +are only expected to match Clang or GCC's output respectively. Normal +`CHECK` lines match against all compilers. (Note: `CHECK-NOT` and +`CHECK-LABEL` are NOT prefixes. They are versions of non-prefixed +`CHECK` lines) + +* Use `extern "C"` to disable name mangling for specific functions. This +makes them easier to name in the `CHECK` lines. + + +## Problems Writing Portable Tests + +Writing tests which check the code generated by a compiler are +inherently non-portable. Different compilers and even different compiler +versions may generate entirely different code. The Benchmark tests +must tolerate this. + +LLVM Filecheck provides a number of mechanisms to help write +"more portable" tests; including [matching using regular expressions](https://llvm.org/docs/CommandGuide/FileCheck.html#filecheck-pattern-matching-syntax), +allowing the creation of [named variables](https://llvm.org/docs/CommandGuide/FileCheck.html#filecheck-variables) +for later matching, and [checking non-sequential matches](https://llvm.org/docs/CommandGuide/FileCheck.html#the-check-dag-directive). + +#### Capturing Variables + +For example, say GCC stores a variable in a register but Clang stores +it in memory. To write a test that tolerates both cases we "capture" +the destination of the store, and then use the captured expression +to write the remainder of the test. + +```c++ +// CHECK-LABEL: test_div_no_op_into_shr: +extern "C" void test_div_no_op_into_shr(int value) { + int divisor = 2; + benchmark::DoNotOptimize(divisor); // hide the value from the optimizer + return value / divisor; + + // CHECK: movl $2, [[DEST:.*]] + // CHECK: idivl [[DEST]] + // CHECK: ret +} +``` + +#### Using Regular Expressions to Match Differing Output + +Often tests require testing assembly lines which may subtly differ +between compilers or compiler versions. A common example of this +is matching stack frame addresses. In this case regular expressions +can be used to match the differing bits of output. For example: + +```c++ +int ExternInt; +struct Point { int x, y, z; }; + +// CHECK-LABEL: test_store_point: +extern "C" void test_store_point() { + Point p{ExternInt, ExternInt, ExternInt}; + benchmark::DoNotOptimize(p); + + // CHECK: movl ExternInt(%rip), %eax + // CHECK: movl %eax, -{{[0-9]+}}(%rsp) + // CHECK: movl %eax, -{{[0-9]+}}(%rsp) + // CHECK: movl %eax, -{{[0-9]+}}(%rsp) + // CHECK: ret +} +``` + +## Current Requirements and Limitations + +The tests require Filecheck to be installed along the `PATH` of the +build machine. Otherwise the tests will be disabled. + +Additionally, as mentioned in the previous section, codegen tests are +inherently non-portable. Currently the tests are limited to: + +* x86_64 targets. +* Compiled with GCC or Clang + +Further work could be done, at least on a limited basis, to extend the +tests to other architectures and compilers (using `CHECK` prefixes). + +Furthermore, the tests fail for builds which specify additional flags +that modify code generation, including `--coverage` or `-fsanitize=`. + diff --git a/benchmarks/thirdparty/benchmark/docs/_config.yml b/benchmarks/thirdparty/benchmark/docs/_config.yml new file mode 100755 index 0000000000..18854876c6 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/docs/_config.yml @@ -0,0 +1 @@ +theme: jekyll-theme-midnight \ No newline at end of file diff --git a/benchmarks/thirdparty/benchmark/docs/releasing.md b/benchmarks/thirdparty/benchmark/docs/releasing.md new file mode 100755 index 0000000000..f0cd7010e3 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/docs/releasing.md @@ -0,0 +1,16 @@ +# How to release + +* Make sure you're on master and synced to HEAD +* Ensure the project builds and tests run (sanity check only, obviously) + * `parallel -j0 exec ::: test/*_test` can help ensure everything at least + passes +* Prepare release notes + * `git log $(git describe --abbrev=0 --tags)..HEAD` gives you the list of + commits between the last annotated tag and HEAD + * Pick the most interesting. +* Create a release through github's interface + * Note this will create a lightweight tag. + * Update this to an annotated tag: + * `git pull --tags` + * `git tag -a -f ` + * `git push --force origin` diff --git a/benchmarks/thirdparty/benchmark/docs/tools.md b/benchmarks/thirdparty/benchmark/docs/tools.md new file mode 100755 index 0000000000..f2d0c497f3 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/docs/tools.md @@ -0,0 +1,203 @@ +# Benchmark Tools + +## compare.py + +The `compare.py` can be used to compare the result of benchmarks. + +### Dependencies +The utility relies on the [scipy](https://www.scipy.org) package which can be installed using pip: +```bash +pip3 install -r requirements.txt +``` + +### Displaying aggregates only + +The switch `-a` / `--display_aggregates_only` can be used to control the +displayment of the normal iterations vs the aggregates. When passed, it will +be passthrough to the benchmark binaries to be run, and will be accounted for +in the tool itself; only the aggregates will be displayed, but not normal runs. +It only affects the display, the separate runs will still be used to calculate +the U test. + +### Modes of operation + +There are three modes of operation: + +1. Just compare two benchmarks +The program is invoked like: + +``` bash +$ compare.py benchmarks [benchmark options]... +``` +Where `` and `` either specify a benchmark executable file, or a JSON output file. The type of the input file is automatically detected. If a benchmark executable is specified then the benchmark is run to obtain the results. Otherwise the results are simply loaded from the output file. + +`[benchmark options]` will be passed to the benchmarks invocations. They can be anything that binary accepts, be it either normal `--benchmark_*` parameters, or some custom parameters your binary takes. + +Example output: +``` +$ ./compare.py benchmarks ./a.out ./a.out +RUNNING: ./a.out --benchmark_out=/tmp/tmprBT5nW +Run on (8 X 4000 MHz CPU s) +2017-11-07 21:16:44 +------------------------------------------------------ +Benchmark Time CPU Iterations +------------------------------------------------------ +BM_memcpy/8 36 ns 36 ns 19101577 211.669MB/s +BM_memcpy/64 76 ns 76 ns 9412571 800.199MB/s +BM_memcpy/512 84 ns 84 ns 8249070 5.64771GB/s +BM_memcpy/1024 116 ns 116 ns 6181763 8.19505GB/s +BM_memcpy/8192 643 ns 643 ns 1062855 11.8636GB/s +BM_copy/8 222 ns 222 ns 3137987 34.3772MB/s +BM_copy/64 1608 ns 1608 ns 432758 37.9501MB/s +BM_copy/512 12589 ns 12589 ns 54806 38.7867MB/s +BM_copy/1024 25169 ns 25169 ns 27713 38.8003MB/s +BM_copy/8192 201165 ns 201112 ns 3486 38.8466MB/s +RUNNING: ./a.out --benchmark_out=/tmp/tmpt1wwG_ +Run on (8 X 4000 MHz CPU s) +2017-11-07 21:16:53 +------------------------------------------------------ +Benchmark Time CPU Iterations +------------------------------------------------------ +BM_memcpy/8 36 ns 36 ns 19397903 211.255MB/s +BM_memcpy/64 73 ns 73 ns 9691174 839.635MB/s +BM_memcpy/512 85 ns 85 ns 8312329 5.60101GB/s +BM_memcpy/1024 118 ns 118 ns 6438774 8.11608GB/s +BM_memcpy/8192 656 ns 656 ns 1068644 11.6277GB/s +BM_copy/8 223 ns 223 ns 3146977 34.2338MB/s +BM_copy/64 1611 ns 1611 ns 435340 37.8751MB/s +BM_copy/512 12622 ns 12622 ns 54818 38.6844MB/s +BM_copy/1024 25257 ns 25239 ns 27779 38.6927MB/s +BM_copy/8192 205013 ns 205010 ns 3479 38.108MB/s +Comparing ./a.out to ./a.out +Benchmark Time CPU Time Old Time New CPU Old CPU New +------------------------------------------------------------------------------------------------------ +BM_memcpy/8 +0.0020 +0.0020 36 36 36 36 +BM_memcpy/64 -0.0468 -0.0470 76 73 76 73 +BM_memcpy/512 +0.0081 +0.0083 84 85 84 85 +BM_memcpy/1024 +0.0098 +0.0097 116 118 116 118 +BM_memcpy/8192 +0.0200 +0.0203 643 656 643 656 +BM_copy/8 +0.0046 +0.0042 222 223 222 223 +BM_copy/64 +0.0020 +0.0020 1608 1611 1608 1611 +BM_copy/512 +0.0027 +0.0026 12589 12622 12589 12622 +BM_copy/1024 +0.0035 +0.0028 25169 25257 25169 25239 +BM_copy/8192 +0.0191 +0.0194 201165 205013 201112 205010 +``` + +What it does is for the every benchmark from the first run it looks for the benchmark with exactly the same name in the second run, and then compares the results. If the names differ, the benchmark is omitted from the diff. +As you can note, the values in `Time` and `CPU` columns are calculated as `(new - old) / |old|`. + +2. Compare two different filters of one benchmark +The program is invoked like: + +``` bash +$ compare.py filters [benchmark options]... +``` +Where `` either specify a benchmark executable file, or a JSON output file. The type of the input file is automatically detected. If a benchmark executable is specified then the benchmark is run to obtain the results. Otherwise the results are simply loaded from the output file. + +Where `` and `` are the same regex filters that you would pass to the `[--benchmark_filter=]` parameter of the benchmark binary. + +`[benchmark options]` will be passed to the benchmarks invocations. They can be anything that binary accepts, be it either normal `--benchmark_*` parameters, or some custom parameters your binary takes. + +Example output: +``` +$ ./compare.py filters ./a.out BM_memcpy BM_copy +RUNNING: ./a.out --benchmark_filter=BM_memcpy --benchmark_out=/tmp/tmpBWKk0k +Run on (8 X 4000 MHz CPU s) +2017-11-07 21:37:28 +------------------------------------------------------ +Benchmark Time CPU Iterations +------------------------------------------------------ +BM_memcpy/8 36 ns 36 ns 17891491 211.215MB/s +BM_memcpy/64 74 ns 74 ns 9400999 825.646MB/s +BM_memcpy/512 87 ns 87 ns 8027453 5.46126GB/s +BM_memcpy/1024 111 ns 111 ns 6116853 8.5648GB/s +BM_memcpy/8192 657 ns 656 ns 1064679 11.6247GB/s +RUNNING: ./a.out --benchmark_filter=BM_copy --benchmark_out=/tmp/tmpAvWcOM +Run on (8 X 4000 MHz CPU s) +2017-11-07 21:37:33 +---------------------------------------------------- +Benchmark Time CPU Iterations +---------------------------------------------------- +BM_copy/8 227 ns 227 ns 3038700 33.6264MB/s +BM_copy/64 1640 ns 1640 ns 426893 37.2154MB/s +BM_copy/512 12804 ns 12801 ns 55417 38.1444MB/s +BM_copy/1024 25409 ns 25407 ns 27516 38.4365MB/s +BM_copy/8192 202986 ns 202990 ns 3454 38.4871MB/s +Comparing BM_memcpy to BM_copy (from ./a.out) +Benchmark Time CPU Time Old Time New CPU Old CPU New +-------------------------------------------------------------------------------------------------------------------- +[BM_memcpy vs. BM_copy]/8 +5.2829 +5.2812 36 227 36 227 +[BM_memcpy vs. BM_copy]/64 +21.1719 +21.1856 74 1640 74 1640 +[BM_memcpy vs. BM_copy]/512 +145.6487 +145.6097 87 12804 87 12801 +[BM_memcpy vs. BM_copy]/1024 +227.1860 +227.1776 111 25409 111 25407 +[BM_memcpy vs. BM_copy]/8192 +308.1664 +308.2898 657 202986 656 202990 +``` + +As you can see, it applies filter to the benchmarks, both when running the benchmark, and before doing the diff. And to make the diff work, the matches are replaced with some common string. Thus, you can compare two different benchmark families within one benchmark binary. +As you can note, the values in `Time` and `CPU` columns are calculated as `(new - old) / |old|`. + +3. Compare filter one from benchmark one to filter two from benchmark two: +The program is invoked like: + +``` bash +$ compare.py filters [benchmark options]... +``` + +Where `` and `` either specify a benchmark executable file, or a JSON output file. The type of the input file is automatically detected. If a benchmark executable is specified then the benchmark is run to obtain the results. Otherwise the results are simply loaded from the output file. + +Where `` and `` are the same regex filters that you would pass to the `[--benchmark_filter=]` parameter of the benchmark binary. + +`[benchmark options]` will be passed to the benchmarks invocations. They can be anything that binary accepts, be it either normal `--benchmark_*` parameters, or some custom parameters your binary takes. + +Example output: +``` +$ ./compare.py benchmarksfiltered ./a.out BM_memcpy ./a.out BM_copy +RUNNING: ./a.out --benchmark_filter=BM_memcpy --benchmark_out=/tmp/tmp_FvbYg +Run on (8 X 4000 MHz CPU s) +2017-11-07 21:38:27 +------------------------------------------------------ +Benchmark Time CPU Iterations +------------------------------------------------------ +BM_memcpy/8 37 ns 37 ns 18953482 204.118MB/s +BM_memcpy/64 74 ns 74 ns 9206578 828.245MB/s +BM_memcpy/512 91 ns 91 ns 8086195 5.25476GB/s +BM_memcpy/1024 120 ns 120 ns 5804513 7.95662GB/s +BM_memcpy/8192 664 ns 664 ns 1028363 11.4948GB/s +RUNNING: ./a.out --benchmark_filter=BM_copy --benchmark_out=/tmp/tmpDfL5iE +Run on (8 X 4000 MHz CPU s) +2017-11-07 21:38:32 +---------------------------------------------------- +Benchmark Time CPU Iterations +---------------------------------------------------- +BM_copy/8 230 ns 230 ns 2985909 33.1161MB/s +BM_copy/64 1654 ns 1653 ns 419408 36.9137MB/s +BM_copy/512 13122 ns 13120 ns 53403 37.2156MB/s +BM_copy/1024 26679 ns 26666 ns 26575 36.6218MB/s +BM_copy/8192 215068 ns 215053 ns 3221 36.3283MB/s +Comparing BM_memcpy (from ./a.out) to BM_copy (from ./a.out) +Benchmark Time CPU Time Old Time New CPU Old CPU New +-------------------------------------------------------------------------------------------------------------------- +[BM_memcpy vs. BM_copy]/8 +5.1649 +5.1637 37 230 37 230 +[BM_memcpy vs. BM_copy]/64 +21.4352 +21.4374 74 1654 74 1653 +[BM_memcpy vs. BM_copy]/512 +143.6022 +143.5865 91 13122 91 13120 +[BM_memcpy vs. BM_copy]/1024 +221.5903 +221.4790 120 26679 120 26666 +[BM_memcpy vs. BM_copy]/8192 +322.9059 +323.0096 664 215068 664 215053 +``` +This is a mix of the previous two modes, two (potentially different) benchmark binaries are run, and a different filter is applied to each one. +As you can note, the values in `Time` and `CPU` columns are calculated as `(new - old) / |old|`. + +### U test + +If there is a sufficient repetition count of the benchmarks, the tool can do +a [U Test](https://en.wikipedia.org/wiki/Mann%E2%80%93Whitney_U_test), of the +null hypothesis that it is equally likely that a randomly selected value from +one sample will be less than or greater than a randomly selected value from a +second sample. + +If the calculated p-value is below this value is lower than the significance +level alpha, then the result is said to be statistically significant and the +null hypothesis is rejected. Which in other words means that the two benchmarks +aren't identical. + +**WARNING**: requires **LARGE** (no less than 9) number of repetitions to be +meaningful! diff --git a/benchmarks/thirdparty/benchmark/include/benchmark/benchmark.h b/benchmarks/thirdparty/benchmark/include/benchmark/benchmark.h new file mode 100755 index 0000000000..01f12620ee --- /dev/null +++ b/benchmarks/thirdparty/benchmark/include/benchmark/benchmark.h @@ -0,0 +1,1601 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Support for registering benchmarks for functions. + +/* Example usage: +// Define a function that executes the code to be measured a +// specified number of times: +static void BM_StringCreation(benchmark::State& state) { + for (auto _ : state) + std::string empty_string; +} + +// Register the function as a benchmark +BENCHMARK(BM_StringCreation); + +// Define another benchmark +static void BM_StringCopy(benchmark::State& state) { + std::string x = "hello"; + for (auto _ : state) + std::string copy(x); +} +BENCHMARK(BM_StringCopy); + +// Augment the main() program to invoke benchmarks if specified +// via the --benchmarks command line flag. E.g., +// my_unittest --benchmark_filter=all +// my_unittest --benchmark_filter=BM_StringCreation +// my_unittest --benchmark_filter=String +// my_unittest --benchmark_filter='Copy|Creation' +int main(int argc, char** argv) { + benchmark::Initialize(&argc, argv); + benchmark::RunSpecifiedBenchmarks(); + return 0; +} + +// Sometimes a family of microbenchmarks can be implemented with +// just one routine that takes an extra argument to specify which +// one of the family of benchmarks to run. For example, the following +// code defines a family of microbenchmarks for measuring the speed +// of memcpy() calls of different lengths: + +static void BM_memcpy(benchmark::State& state) { + char* src = new char[state.range(0)]; char* dst = new char[state.range(0)]; + memset(src, 'x', state.range(0)); + for (auto _ : state) + memcpy(dst, src, state.range(0)); + state.SetBytesProcessed(state.iterations() * state.range(0)); + delete[] src; delete[] dst; +} +BENCHMARK(BM_memcpy)->Arg(8)->Arg(64)->Arg(512)->Arg(1<<10)->Arg(8<<10); + +// The preceding code is quite repetitive, and can be replaced with the +// following short-hand. The following invocation will pick a few +// appropriate arguments in the specified range and will generate a +// microbenchmark for each such argument. +BENCHMARK(BM_memcpy)->Range(8, 8<<10); + +// You might have a microbenchmark that depends on two inputs. For +// example, the following code defines a family of microbenchmarks for +// measuring the speed of set insertion. +static void BM_SetInsert(benchmark::State& state) { + set data; + for (auto _ : state) { + state.PauseTiming(); + data = ConstructRandomSet(state.range(0)); + state.ResumeTiming(); + for (int j = 0; j < state.range(1); ++j) + data.insert(RandomNumber()); + } +} +BENCHMARK(BM_SetInsert) + ->Args({1<<10, 128}) + ->Args({2<<10, 128}) + ->Args({4<<10, 128}) + ->Args({8<<10, 128}) + ->Args({1<<10, 512}) + ->Args({2<<10, 512}) + ->Args({4<<10, 512}) + ->Args({8<<10, 512}); + +// The preceding code is quite repetitive, and can be replaced with +// the following short-hand. The following macro will pick a few +// appropriate arguments in the product of the two specified ranges +// and will generate a microbenchmark for each such pair. +BENCHMARK(BM_SetInsert)->Ranges({{1<<10, 8<<10}, {128, 512}}); + +// For more complex patterns of inputs, passing a custom function +// to Apply allows programmatic specification of an +// arbitrary set of arguments to run the microbenchmark on. +// The following example enumerates a dense range on +// one parameter, and a sparse range on the second. +static void CustomArguments(benchmark::internal::Benchmark* b) { + for (int i = 0; i <= 10; ++i) + for (int j = 32; j <= 1024*1024; j *= 8) + b->Args({i, j}); +} +BENCHMARK(BM_SetInsert)->Apply(CustomArguments); + +// Templated microbenchmarks work the same way: +// Produce then consume 'size' messages 'iters' times +// Measures throughput in the absence of multiprogramming. +template int BM_Sequential(benchmark::State& state) { + Q q; + typename Q::value_type v; + for (auto _ : state) { + for (int i = state.range(0); i--; ) + q.push(v); + for (int e = state.range(0); e--; ) + q.Wait(&v); + } + // actually messages, not bytes: + state.SetBytesProcessed(state.iterations() * state.range(0)); +} +BENCHMARK_TEMPLATE(BM_Sequential, WaitQueue)->Range(1<<0, 1<<10); + +Use `Benchmark::MinTime(double t)` to set the minimum time used to run the +benchmark. This option overrides the `benchmark_min_time` flag. + +void BM_test(benchmark::State& state) { + ... body ... +} +BENCHMARK(BM_test)->MinTime(2.0); // Run for at least 2 seconds. + +In a multithreaded test, it is guaranteed that none of the threads will start +until all have reached the loop start, and all will have finished before any +thread exits the loop body. As such, any global setup or teardown you want to +do can be wrapped in a check against the thread index: + +static void BM_MultiThreaded(benchmark::State& state) { + if (state.thread_index == 0) { + // Setup code here. + } + for (auto _ : state) { + // Run the test as normal. + } + if (state.thread_index == 0) { + // Teardown code here. + } +} +BENCHMARK(BM_MultiThreaded)->Threads(4); + + +If a benchmark runs a few milliseconds it may be hard to visually compare the +measured times, since the output data is given in nanoseconds per default. In +order to manually set the time unit, you can specify it manually: + +BENCHMARK(BM_test)->Unit(benchmark::kMillisecond); +*/ + +#ifndef BENCHMARK_BENCHMARK_H_ +#define BENCHMARK_BENCHMARK_H_ + +// The _MSVC_LANG check should detect Visual Studio 2015 Update 3 and newer. +#if __cplusplus >= 201103L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201103L) +#define BENCHMARK_HAS_CXX11 +#endif + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(BENCHMARK_HAS_CXX11) +#include +#include +#include +#endif + +#if defined(_MSC_VER) +#include // for _ReadWriteBarrier +#endif + +#ifndef BENCHMARK_HAS_CXX11 +#define BENCHMARK_DISALLOW_COPY_AND_ASSIGN(TypeName) \ + TypeName(const TypeName&); \ + TypeName& operator=(const TypeName&) +#else +#define BENCHMARK_DISALLOW_COPY_AND_ASSIGN(TypeName) \ + TypeName(const TypeName&) = delete; \ + TypeName& operator=(const TypeName&) = delete +#endif + +#if defined(__GNUC__) +#define BENCHMARK_UNUSED __attribute__((unused)) +#define BENCHMARK_ALWAYS_INLINE __attribute__((always_inline)) +#define BENCHMARK_NOEXCEPT noexcept +#define BENCHMARK_NOEXCEPT_OP(x) noexcept(x) +#elif defined(_MSC_VER) && !defined(__clang__) +#define BENCHMARK_UNUSED +#define BENCHMARK_ALWAYS_INLINE __forceinline +#if _MSC_VER >= 1900 +#define BENCHMARK_NOEXCEPT noexcept +#define BENCHMARK_NOEXCEPT_OP(x) noexcept(x) +#else +#define BENCHMARK_NOEXCEPT +#define BENCHMARK_NOEXCEPT_OP(x) +#endif +#define __func__ __FUNCTION__ +#else +#define BENCHMARK_UNUSED +#define BENCHMARK_ALWAYS_INLINE +#define BENCHMARK_NOEXCEPT +#define BENCHMARK_NOEXCEPT_OP(x) +#endif + +#define BENCHMARK_INTERNAL_TOSTRING2(x) #x +#define BENCHMARK_INTERNAL_TOSTRING(x) BENCHMARK_INTERNAL_TOSTRING2(x) + +#if defined(__GNUC__) || defined(__clang__) +#define BENCHMARK_BUILTIN_EXPECT(x, y) __builtin_expect(x, y) +#define BENCHMARK_DEPRECATED_MSG(msg) __attribute__((deprecated(msg))) +#else +#define BENCHMARK_BUILTIN_EXPECT(x, y) x +#define BENCHMARK_DEPRECATED_MSG(msg) +#define BENCHMARK_WARNING_MSG(msg) \ + __pragma(message(__FILE__ "(" BENCHMARK_INTERNAL_TOSTRING( \ + __LINE__) ") : warning note: " msg)) +#endif + +#if defined(__GNUC__) && !defined(__clang__) +#define BENCHMARK_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) +#endif + +#ifndef __has_builtin +#define __has_builtin(x) 0 +#endif + +#if defined(__GNUC__) || __has_builtin(__builtin_unreachable) +#define BENCHMARK_UNREACHABLE() __builtin_unreachable() +#elif defined(_MSC_VER) +#define BENCHMARK_UNREACHABLE() __assume(false) +#else +#define BENCHMARK_UNREACHABLE() ((void)0) +#endif + +namespace benchmark { +class BenchmarkReporter; +class MemoryManager; + +void Initialize(int* argc, char** argv); + +// Report to stdout all arguments in 'argv' as unrecognized except the first. +// Returns true there is at least on unrecognized argument (i.e. 'argc' > 1). +bool ReportUnrecognizedArguments(int argc, char** argv); + +// Generate a list of benchmarks matching the specified --benchmark_filter flag +// and if --benchmark_list_tests is specified return after printing the name +// of each matching benchmark. Otherwise run each matching benchmark and +// report the results. +// +// The second and third overload use the specified 'display_reporter' and +// 'file_reporter' respectively. 'file_reporter' will write to the file +// specified +// by '--benchmark_output'. If '--benchmark_output' is not given the +// 'file_reporter' is ignored. +// +// RETURNS: The number of matching benchmarks. +size_t RunSpecifiedBenchmarks(); +size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter); +size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter, + BenchmarkReporter* file_reporter); + +// Register a MemoryManager instance that will be used to collect and report +// allocation measurements for benchmark runs. +void RegisterMemoryManager(MemoryManager* memory_manager); + +namespace internal { +class Benchmark; +class BenchmarkImp; +class BenchmarkFamilies; + +void UseCharPointer(char const volatile*); + +// Take ownership of the pointer and register the benchmark. Return the +// registered benchmark. +Benchmark* RegisterBenchmarkInternal(Benchmark*); + +// Ensure that the standard streams are properly initialized in every TU. +int InitializeStreams(); +BENCHMARK_UNUSED static int stream_init_anchor = InitializeStreams(); + +} // namespace internal + +#if (!defined(__GNUC__) && !defined(__clang__)) || defined(__pnacl__) || \ + defined(__EMSCRIPTEN__) +#define BENCHMARK_HAS_NO_INLINE_ASSEMBLY +#endif + +// The DoNotOptimize(...) function can be used to prevent a value or +// expression from being optimized away by the compiler. This function is +// intended to add little to no overhead. +// See: https://youtu.be/nXaxk27zwlk?t=2441 +#ifndef BENCHMARK_HAS_NO_INLINE_ASSEMBLY +template +inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) { + asm volatile("" : : "r,m"(value) : "memory"); +} + +template +inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp& value) { +#if defined(__clang__) + asm volatile("" : "+r,m"(value) : : "memory"); +#else + asm volatile("" : "+m,r"(value) : : "memory"); +#endif +} + +// Force the compiler to flush pending writes to global memory. Acts as an +// effective read/write barrier +inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() { + asm volatile("" : : : "memory"); +} +#elif defined(_MSC_VER) +template +inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) { + internal::UseCharPointer(&reinterpret_cast(value)); + _ReadWriteBarrier(); +} + +inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() { _ReadWriteBarrier(); } +#else +template +inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) { + internal::UseCharPointer(&reinterpret_cast(value)); +} +// FIXME Add ClobberMemory() for non-gnu and non-msvc compilers +#endif + +// This class is used for user-defined counters. +class Counter { + public: + enum Flags { + kDefaults = 0, + // Mark the counter as a rate. It will be presented divided + // by the duration of the benchmark. + kIsRate = 1U << 0U, + // Mark the counter as a thread-average quantity. It will be + // presented divided by the number of threads. + kAvgThreads = 1U << 1U, + // Mark the counter as a thread-average rate. See above. + kAvgThreadsRate = kIsRate | kAvgThreads, + // Mark the counter as a constant value, valid/same for *every* iteration. + // When reporting, it will be *multiplied* by the iteration count. + kIsIterationInvariant = 1U << 2U, + // Mark the counter as a constant rate. + // When reporting, it will be *multiplied* by the iteration count + // and then divided by the duration of the benchmark. + kIsIterationInvariantRate = kIsRate | kIsIterationInvariant, + // Mark the counter as a iteration-average quantity. + // It will be presented divided by the number of iterations. + kAvgIterations = 1U << 3U, + // Mark the counter as a iteration-average rate. See above. + kAvgIterationsRate = kIsRate | kAvgIterations, + + // In the end, invert the result. This is always done last! + kInvert = 1U << 31U + }; + + enum OneK { + // 1'000 items per 1k + kIs1000 = 1000, + // 1'024 items per 1k + kIs1024 = 1024 + }; + + double value; + Flags flags; + OneK oneK; + + BENCHMARK_ALWAYS_INLINE + Counter(double v = 0., Flags f = kDefaults, OneK k = kIs1000) + : value(v), flags(f), oneK(k) {} + + BENCHMARK_ALWAYS_INLINE operator double const&() const { return value; } + BENCHMARK_ALWAYS_INLINE operator double&() { return value; } +}; + +// A helper for user code to create unforeseen combinations of Flags, without +// having to do this cast manually each time, or providing this operator. +Counter::Flags inline operator|(const Counter::Flags& LHS, + const Counter::Flags& RHS) { + return static_cast(static_cast(LHS) | + static_cast(RHS)); +} + +// This is the container for the user-defined counters. +typedef std::map UserCounters; + +// TimeUnit is passed to a benchmark in order to specify the order of magnitude +// for the measured time. +enum TimeUnit { kNanosecond, kMicrosecond, kMillisecond }; + +// BigO is passed to a benchmark in order to specify the asymptotic +// computational +// complexity for the benchmark. In case oAuto is selected, complexity will be +// calculated automatically to the best fit. +enum BigO { oNone, o1, oN, oNSquared, oNCubed, oLogN, oNLogN, oAuto, oLambda }; + +typedef uint64_t IterationCount; + +// BigOFunc is passed to a benchmark in order to specify the asymptotic +// computational complexity for the benchmark. +typedef double(BigOFunc)(IterationCount); + +// StatisticsFunc is passed to a benchmark in order to compute some descriptive +// statistics over all the measurements of some type +typedef double(StatisticsFunc)(const std::vector&); + +namespace internal { +struct Statistics { + std::string name_; + StatisticsFunc* compute_; + + Statistics(const std::string& name, StatisticsFunc* compute) + : name_(name), compute_(compute) {} +}; + +struct BenchmarkInstance; +class ThreadTimer; +class ThreadManager; + +enum AggregationReportMode +#if defined(BENCHMARK_HAS_CXX11) + : unsigned +#else +#endif +{ + // The mode has not been manually specified + ARM_Unspecified = 0, + // The mode is user-specified. + // This may or may not be set when the following bit-flags are set. + ARM_Default = 1U << 0U, + // File reporter should only output aggregates. + ARM_FileReportAggregatesOnly = 1U << 1U, + // Display reporter should only output aggregates + ARM_DisplayReportAggregatesOnly = 1U << 2U, + // Both reporters should only display aggregates. + ARM_ReportAggregatesOnly = + ARM_FileReportAggregatesOnly | ARM_DisplayReportAggregatesOnly +}; + +} // namespace internal + +// State is passed to a running Benchmark and contains state for the +// benchmark to use. +class State { + public: + struct StateIterator; + friend struct StateIterator; + + // Returns iterators used to run each iteration of a benchmark using a + // C++11 ranged-based for loop. These functions should not be called directly. + // + // REQUIRES: The benchmark has not started running yet. Neither begin nor end + // have been called previously. + // + // NOTE: KeepRunning may not be used after calling either of these functions. + BENCHMARK_ALWAYS_INLINE StateIterator begin(); + BENCHMARK_ALWAYS_INLINE StateIterator end(); + + // Returns true if the benchmark should continue through another iteration. + // NOTE: A benchmark may not return from the test until KeepRunning() has + // returned false. + bool KeepRunning(); + + // Returns true iff the benchmark should run n more iterations. + // REQUIRES: 'n' > 0. + // NOTE: A benchmark must not return from the test until KeepRunningBatch() + // has returned false. + // NOTE: KeepRunningBatch() may overshoot by up to 'n' iterations. + // + // Intended usage: + // while (state.KeepRunningBatch(1000)) { + // // process 1000 elements + // } + bool KeepRunningBatch(IterationCount n); + + // REQUIRES: timer is running and 'SkipWithError(...)' has not been called + // by the current thread. + // Stop the benchmark timer. If not called, the timer will be + // automatically stopped after the last iteration of the benchmark loop. + // + // For threaded benchmarks the PauseTiming() function only pauses the timing + // for the current thread. + // + // NOTE: The "real time" measurement is per-thread. If different threads + // report different measurements the largest one is reported. + // + // NOTE: PauseTiming()/ResumeTiming() are relatively + // heavyweight, and so their use should generally be avoided + // within each benchmark iteration, if possible. + void PauseTiming(); + + // REQUIRES: timer is not running and 'SkipWithError(...)' has not been called + // by the current thread. + // Start the benchmark timer. The timer is NOT running on entrance to the + // benchmark function. It begins running after control flow enters the + // benchmark loop. + // + // NOTE: PauseTiming()/ResumeTiming() are relatively + // heavyweight, and so their use should generally be avoided + // within each benchmark iteration, if possible. + void ResumeTiming(); + + // REQUIRES: 'SkipWithError(...)' has not been called previously by the + // current thread. + // Report the benchmark as resulting in an error with the specified 'msg'. + // After this call the user may explicitly 'return' from the benchmark. + // + // If the ranged-for style of benchmark loop is used, the user must explicitly + // break from the loop, otherwise all future iterations will be run. + // If the 'KeepRunning()' loop is used the current thread will automatically + // exit the loop at the end of the current iteration. + // + // For threaded benchmarks only the current thread stops executing and future + // calls to `KeepRunning()` will block until all threads have completed + // the `KeepRunning()` loop. If multiple threads report an error only the + // first error message is used. + // + // NOTE: Calling 'SkipWithError(...)' does not cause the benchmark to exit + // the current scope immediately. If the function is called from within + // the 'KeepRunning()' loop the current iteration will finish. It is the users + // responsibility to exit the scope as needed. + void SkipWithError(const char* msg); + + // Returns true if an error has been reported with 'SkipWithError(...)'. + bool error_occurred() const { return error_occurred_; } + + // REQUIRES: called exactly once per iteration of the benchmarking loop. + // Set the manually measured time for this benchmark iteration, which + // is used instead of automatically measured time if UseManualTime() was + // specified. + // + // For threaded benchmarks the final value will be set to the largest + // reported values. + void SetIterationTime(double seconds); + + // Set the number of bytes processed by the current benchmark + // execution. This routine is typically called once at the end of a + // throughput oriented benchmark. + // + // REQUIRES: a benchmark has exited its benchmarking loop. + BENCHMARK_ALWAYS_INLINE + void SetBytesProcessed(int64_t bytes) { + counters["bytes_per_second"] = + Counter(static_cast(bytes), Counter::kIsRate, Counter::kIs1024); + } + + BENCHMARK_ALWAYS_INLINE + int64_t bytes_processed() const { + if (counters.find("bytes_per_second") != counters.end()) + return static_cast(counters.at("bytes_per_second")); + return 0; + } + + // If this routine is called with complexity_n > 0 and complexity report is + // requested for the + // family benchmark, then current benchmark will be part of the computation + // and complexity_n will + // represent the length of N. + BENCHMARK_ALWAYS_INLINE + void SetComplexityN(int64_t complexity_n) { complexity_n_ = complexity_n; } + + BENCHMARK_ALWAYS_INLINE + int64_t complexity_length_n() const { return complexity_n_; } + + // If this routine is called with items > 0, then an items/s + // label is printed on the benchmark report line for the currently + // executing benchmark. It is typically called at the end of a processing + // benchmark where a processing items/second output is desired. + // + // REQUIRES: a benchmark has exited its benchmarking loop. + BENCHMARK_ALWAYS_INLINE + void SetItemsProcessed(int64_t items) { + counters["items_per_second"] = + Counter(static_cast(items), benchmark::Counter::kIsRate); + } + + BENCHMARK_ALWAYS_INLINE + int64_t items_processed() const { + if (counters.find("items_per_second") != counters.end()) + return static_cast(counters.at("items_per_second")); + return 0; + } + + // If this routine is called, the specified label is printed at the + // end of the benchmark report line for the currently executing + // benchmark. Example: + // static void BM_Compress(benchmark::State& state) { + // ... + // double compress = input_size / output_size; + // state.SetLabel(StrFormat("compress:%.1f%%", 100.0*compression)); + // } + // Produces output that looks like: + // BM_Compress 50 50 14115038 compress:27.3% + // + // REQUIRES: a benchmark has exited its benchmarking loop. + void SetLabel(const char* label); + + void BENCHMARK_ALWAYS_INLINE SetLabel(const std::string& str) { + this->SetLabel(str.c_str()); + } + + // Range arguments for this run. CHECKs if the argument has been set. + BENCHMARK_ALWAYS_INLINE + int64_t range(std::size_t pos = 0) const { + assert(range_.size() > pos); + return range_[pos]; + } + + BENCHMARK_DEPRECATED_MSG("use 'range(0)' instead") + int64_t range_x() const { return range(0); } + + BENCHMARK_DEPRECATED_MSG("use 'range(1)' instead") + int64_t range_y() const { return range(1); } + + BENCHMARK_ALWAYS_INLINE + IterationCount iterations() const { + if (BENCHMARK_BUILTIN_EXPECT(!started_, false)) { + return 0; + } + return max_iterations - total_iterations_ + batch_leftover_; + } + + private + : // items we expect on the first cache line (ie 64 bytes of the struct) + // When total_iterations_ is 0, KeepRunning() and friends will return false. + // May be larger than max_iterations. + IterationCount total_iterations_; + + // When using KeepRunningBatch(), batch_leftover_ holds the number of + // iterations beyond max_iters that were run. Used to track + // completed_iterations_ accurately. + IterationCount batch_leftover_; + + public: + const IterationCount max_iterations; + + private: + bool started_; + bool finished_; + bool error_occurred_; + + private: // items we don't need on the first cache line + std::vector range_; + + int64_t complexity_n_; + + public: + // Container for user-defined counters. + UserCounters counters; + // Index of the executing thread. Values from [0, threads). + const int thread_index; + // Number of threads concurrently executing the benchmark. + const int threads; + + private: + State(IterationCount max_iters, const std::vector& ranges, + int thread_i, int n_threads, internal::ThreadTimer* timer, + internal::ThreadManager* manager); + + void StartKeepRunning(); + // Implementation of KeepRunning() and KeepRunningBatch(). + // is_batch must be true unless n is 1. + bool KeepRunningInternal(IterationCount n, bool is_batch); + void FinishKeepRunning(); + internal::ThreadTimer* timer_; + internal::ThreadManager* manager_; + + friend struct internal::BenchmarkInstance; +}; + +inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunning() { + return KeepRunningInternal(1, /*is_batch=*/false); +} + +inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunningBatch(IterationCount n) { + return KeepRunningInternal(n, /*is_batch=*/true); +} + +inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunningInternal(IterationCount n, + bool is_batch) { + // total_iterations_ is set to 0 by the constructor, and always set to a + // nonzero value by StartKepRunning(). + assert(n > 0); + // n must be 1 unless is_batch is true. + assert(is_batch || n == 1); + if (BENCHMARK_BUILTIN_EXPECT(total_iterations_ >= n, true)) { + total_iterations_ -= n; + return true; + } + if (!started_) { + StartKeepRunning(); + if (!error_occurred_ && total_iterations_ >= n) { + total_iterations_ -= n; + return true; + } + } + // For non-batch runs, total_iterations_ must be 0 by now. + if (is_batch && total_iterations_ != 0) { + batch_leftover_ = n - total_iterations_; + total_iterations_ = 0; + return true; + } + FinishKeepRunning(); + return false; +} + +struct State::StateIterator { + struct BENCHMARK_UNUSED Value {}; + typedef std::forward_iterator_tag iterator_category; + typedef Value value_type; + typedef Value reference; + typedef Value pointer; + typedef std::ptrdiff_t difference_type; + + private: + friend class State; + BENCHMARK_ALWAYS_INLINE + StateIterator() : cached_(0), parent_() {} + + BENCHMARK_ALWAYS_INLINE + explicit StateIterator(State* st) + : cached_(st->error_occurred_ ? 0 : st->max_iterations), parent_(st) {} + + public: + BENCHMARK_ALWAYS_INLINE + Value operator*() const { return Value(); } + + BENCHMARK_ALWAYS_INLINE + StateIterator& operator++() { + assert(cached_ > 0); + --cached_; + return *this; + } + + BENCHMARK_ALWAYS_INLINE + bool operator!=(StateIterator const&) const { + if (BENCHMARK_BUILTIN_EXPECT(cached_ != 0, true)) return true; + parent_->FinishKeepRunning(); + return false; + } + + private: + IterationCount cached_; + State* const parent_; +}; + +inline BENCHMARK_ALWAYS_INLINE State::StateIterator State::begin() { + return StateIterator(this); +} +inline BENCHMARK_ALWAYS_INLINE State::StateIterator State::end() { + StartKeepRunning(); + return StateIterator(); +} + +namespace internal { + +typedef void(Function)(State&); + +// ------------------------------------------------------ +// Benchmark registration object. The BENCHMARK() macro expands +// into an internal::Benchmark* object. Various methods can +// be called on this object to change the properties of the benchmark. +// Each method returns "this" so that multiple method calls can +// chained into one expression. +class Benchmark { + public: + virtual ~Benchmark(); + + // Note: the following methods all return "this" so that multiple + // method calls can be chained together in one expression. + + // Run this benchmark once with "x" as the extra argument passed + // to the function. + // REQUIRES: The function passed to the constructor must accept an arg1. + Benchmark* Arg(int64_t x); + + // Run this benchmark with the given time unit for the generated output report + Benchmark* Unit(TimeUnit unit); + + // Run this benchmark once for a number of values picked from the + // range [start..limit]. (start and limit are always picked.) + // REQUIRES: The function passed to the constructor must accept an arg1. + Benchmark* Range(int64_t start, int64_t limit); + + // Run this benchmark once for all values in the range [start..limit] with + // specific step + // REQUIRES: The function passed to the constructor must accept an arg1. + Benchmark* DenseRange(int64_t start, int64_t limit, int step = 1); + + // Run this benchmark once with "args" as the extra arguments passed + // to the function. + // REQUIRES: The function passed to the constructor must accept arg1, arg2 ... + Benchmark* Args(const std::vector& args); + + // Equivalent to Args({x, y}) + // NOTE: This is a legacy C++03 interface provided for compatibility only. + // New code should use 'Args'. + Benchmark* ArgPair(int64_t x, int64_t y) { + std::vector args; + args.push_back(x); + args.push_back(y); + return Args(args); + } + + // Run this benchmark once for a number of values picked from the + // ranges [start..limit]. (starts and limits are always picked.) + // REQUIRES: The function passed to the constructor must accept arg1, arg2 ... + Benchmark* Ranges(const std::vector >& ranges); + + // Run this benchmark once for each combination of values in the (cartesian) + // product of the supplied argument lists. + // REQUIRES: The function passed to the constructor must accept arg1, arg2 ... + Benchmark* ArgsProduct(const std::vector >& arglists); + + // Equivalent to ArgNames({name}) + Benchmark* ArgName(const std::string& name); + + // Set the argument names to display in the benchmark name. If not called, + // only argument values will be shown. + Benchmark* ArgNames(const std::vector& names); + + // Equivalent to Ranges({{lo1, hi1}, {lo2, hi2}}). + // NOTE: This is a legacy C++03 interface provided for compatibility only. + // New code should use 'Ranges'. + Benchmark* RangePair(int64_t lo1, int64_t hi1, int64_t lo2, int64_t hi2) { + std::vector > ranges; + ranges.push_back(std::make_pair(lo1, hi1)); + ranges.push_back(std::make_pair(lo2, hi2)); + return Ranges(ranges); + } + + // Pass this benchmark object to *func, which can customize + // the benchmark by calling various methods like Arg, Args, + // Threads, etc. + Benchmark* Apply(void (*func)(Benchmark* benchmark)); + + // Set the range multiplier for non-dense range. If not called, the range + // multiplier kRangeMultiplier will be used. + Benchmark* RangeMultiplier(int multiplier); + + // Set the minimum amount of time to use when running this benchmark. This + // option overrides the `benchmark_min_time` flag. + // REQUIRES: `t > 0` and `Iterations` has not been called on this benchmark. + Benchmark* MinTime(double t); + + // Specify the amount of iterations that should be run by this benchmark. + // REQUIRES: 'n > 0' and `MinTime` has not been called on this benchmark. + // + // NOTE: This function should only be used when *exact* iteration control is + // needed and never to control or limit how long a benchmark runs, where + // `--benchmark_min_time=N` or `MinTime(...)` should be used instead. + Benchmark* Iterations(IterationCount n); + + // Specify the amount of times to repeat this benchmark. This option overrides + // the `benchmark_repetitions` flag. + // REQUIRES: `n > 0` + Benchmark* Repetitions(int n); + + // Specify if each repetition of the benchmark should be reported separately + // or if only the final statistics should be reported. If the benchmark + // is not repeated then the single result is always reported. + // Applies to *ALL* reporters (display and file). + Benchmark* ReportAggregatesOnly(bool value = true); + + // Same as ReportAggregatesOnly(), but applies to display reporter only. + Benchmark* DisplayAggregatesOnly(bool value = true); + + // By default, the CPU time is measured only for the main thread, which may + // be unrepresentative if the benchmark uses threads internally. If called, + // the total CPU time spent by all the threads will be measured instead. + // By default, the only the main thread CPU time will be measured. + Benchmark* MeasureProcessCPUTime(); + + // If a particular benchmark should use the Wall clock instead of the CPU time + // (be it either the CPU time of the main thread only (default), or the + // total CPU usage of the benchmark), call this method. If called, the elapsed + // (wall) time will be used to control how many iterations are run, and in the + // printing of items/second or MB/seconds values. + // If not called, the CPU time used by the benchmark will be used. + Benchmark* UseRealTime(); + + // If a benchmark must measure time manually (e.g. if GPU execution time is + // being + // measured), call this method. If called, each benchmark iteration should + // call + // SetIterationTime(seconds) to report the measured time, which will be used + // to control how many iterations are run, and in the printing of items/second + // or MB/second values. + Benchmark* UseManualTime(); + + // Set the asymptotic computational complexity for the benchmark. If called + // the asymptotic computational complexity will be shown on the output. + Benchmark* Complexity(BigO complexity = benchmark::oAuto); + + // Set the asymptotic computational complexity for the benchmark. If called + // the asymptotic computational complexity will be shown on the output. + Benchmark* Complexity(BigOFunc* complexity); + + // Add this statistics to be computed over all the values of benchmark run + Benchmark* ComputeStatistics(std::string name, StatisticsFunc* statistics); + + // Support for running multiple copies of the same benchmark concurrently + // in multiple threads. This may be useful when measuring the scaling + // of some piece of code. + + // Run one instance of this benchmark concurrently in t threads. + Benchmark* Threads(int t); + + // Pick a set of values T from [min_threads,max_threads]. + // min_threads and max_threads are always included in T. Run this + // benchmark once for each value in T. The benchmark run for a + // particular value t consists of t threads running the benchmark + // function concurrently. For example, consider: + // BENCHMARK(Foo)->ThreadRange(1,16); + // This will run the following benchmarks: + // Foo in 1 thread + // Foo in 2 threads + // Foo in 4 threads + // Foo in 8 threads + // Foo in 16 threads + Benchmark* ThreadRange(int min_threads, int max_threads); + + // For each value n in the range, run this benchmark once using n threads. + // min_threads and max_threads are always included in the range. + // stride specifies the increment. E.g. DenseThreadRange(1, 8, 3) starts + // a benchmark with 1, 4, 7 and 8 threads. + Benchmark* DenseThreadRange(int min_threads, int max_threads, int stride = 1); + + // Equivalent to ThreadRange(NumCPUs(), NumCPUs()) + Benchmark* ThreadPerCpu(); + + virtual void Run(State& state) = 0; + + protected: + explicit Benchmark(const char* name); + Benchmark(Benchmark const&); + void SetName(const char* name); + + int ArgsCnt() const; + + private: + friend class BenchmarkFamilies; + + std::string name_; + AggregationReportMode aggregation_report_mode_; + std::vector arg_names_; // Args for all benchmark runs + std::vector > args_; // Args for all benchmark runs + TimeUnit time_unit_; + int range_multiplier_; + double min_time_; + IterationCount iterations_; + int repetitions_; + bool measure_process_cpu_time_; + bool use_real_time_; + bool use_manual_time_; + BigO complexity_; + BigOFunc* complexity_lambda_; + std::vector statistics_; + std::vector thread_counts_; + + Benchmark& operator=(Benchmark const&); +}; + +} // namespace internal + +// Create and register a benchmark with the specified 'name' that invokes +// the specified functor 'fn'. +// +// RETURNS: A pointer to the registered benchmark. +internal::Benchmark* RegisterBenchmark(const char* name, + internal::Function* fn); + +#if defined(BENCHMARK_HAS_CXX11) +template +internal::Benchmark* RegisterBenchmark(const char* name, Lambda&& fn); +#endif + +// Remove all registered benchmarks. All pointers to previously registered +// benchmarks are invalidated. +void ClearRegisteredBenchmarks(); + +namespace internal { +// The class used to hold all Benchmarks created from static function. +// (ie those created using the BENCHMARK(...) macros. +class FunctionBenchmark : public Benchmark { + public: + FunctionBenchmark(const char* name, Function* func) + : Benchmark(name), func_(func) {} + + virtual void Run(State& st); + + private: + Function* func_; +}; + +#ifdef BENCHMARK_HAS_CXX11 +template +class LambdaBenchmark : public Benchmark { + public: + virtual void Run(State& st) { lambda_(st); } + + private: + template + LambdaBenchmark(const char* name, OLambda&& lam) + : Benchmark(name), lambda_(std::forward(lam)) {} + + LambdaBenchmark(LambdaBenchmark const&) = delete; + + private: + template + friend Benchmark* ::benchmark::RegisterBenchmark(const char*, Lam&&); + + Lambda lambda_; +}; +#endif + +} // namespace internal + +inline internal::Benchmark* RegisterBenchmark(const char* name, + internal::Function* fn) { + return internal::RegisterBenchmarkInternal( + ::new internal::FunctionBenchmark(name, fn)); +} + +#ifdef BENCHMARK_HAS_CXX11 +template +internal::Benchmark* RegisterBenchmark(const char* name, Lambda&& fn) { + using BenchType = + internal::LambdaBenchmark::type>; + return internal::RegisterBenchmarkInternal( + ::new BenchType(name, std::forward(fn))); +} +#endif + +#if defined(BENCHMARK_HAS_CXX11) && \ + (!defined(BENCHMARK_GCC_VERSION) || BENCHMARK_GCC_VERSION >= 409) +template +internal::Benchmark* RegisterBenchmark(const char* name, Lambda&& fn, + Args&&... args) { + return benchmark::RegisterBenchmark( + name, [=](benchmark::State& st) { fn(st, args...); }); +} +#else +#define BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK +#endif + +// The base class for all fixture tests. +class Fixture : public internal::Benchmark { + public: + Fixture() : internal::Benchmark("") {} + + virtual void Run(State& st) { + this->SetUp(st); + this->BenchmarkCase(st); + this->TearDown(st); + } + + // These will be deprecated ... + virtual void SetUp(const State&) {} + virtual void TearDown(const State&) {} + // ... In favor of these. + virtual void SetUp(State& st) { SetUp(const_cast(st)); } + virtual void TearDown(State& st) { TearDown(const_cast(st)); } + + protected: + virtual void BenchmarkCase(State&) = 0; +}; + +} // namespace benchmark + +// ------------------------------------------------------ +// Macro to register benchmarks + +// Check that __COUNTER__ is defined and that __COUNTER__ increases by 1 +// every time it is expanded. X + 1 == X + 0 is used in case X is defined to be +// empty. If X is empty the expression becomes (+1 == +0). +#if defined(__COUNTER__) && (__COUNTER__ + 1 == __COUNTER__ + 0) +#define BENCHMARK_PRIVATE_UNIQUE_ID __COUNTER__ +#else +#define BENCHMARK_PRIVATE_UNIQUE_ID __LINE__ +#endif + +// Helpers for generating unique variable names +#define BENCHMARK_PRIVATE_NAME(n) \ + BENCHMARK_PRIVATE_CONCAT(_benchmark_, BENCHMARK_PRIVATE_UNIQUE_ID, n) +#define BENCHMARK_PRIVATE_CONCAT(a, b, c) BENCHMARK_PRIVATE_CONCAT2(a, b, c) +#define BENCHMARK_PRIVATE_CONCAT2(a, b, c) a##b##c + +#define BENCHMARK_PRIVATE_DECLARE(n) \ + static ::benchmark::internal::Benchmark* BENCHMARK_PRIVATE_NAME(n) \ + BENCHMARK_UNUSED + +#define BENCHMARK(n) \ + BENCHMARK_PRIVATE_DECLARE(n) = \ + (::benchmark::internal::RegisterBenchmarkInternal( \ + new ::benchmark::internal::FunctionBenchmark(#n, n))) + +// Old-style macros +#define BENCHMARK_WITH_ARG(n, a) BENCHMARK(n)->Arg((a)) +#define BENCHMARK_WITH_ARG2(n, a1, a2) BENCHMARK(n)->Args({(a1), (a2)}) +#define BENCHMARK_WITH_UNIT(n, t) BENCHMARK(n)->Unit((t)) +#define BENCHMARK_RANGE(n, lo, hi) BENCHMARK(n)->Range((lo), (hi)) +#define BENCHMARK_RANGE2(n, l1, h1, l2, h2) \ + BENCHMARK(n)->RangePair({{(l1), (h1)}, {(l2), (h2)}}) + +#ifdef BENCHMARK_HAS_CXX11 + +// Register a benchmark which invokes the function specified by `func` +// with the additional arguments specified by `...`. +// +// For example: +// +// template ` +// void BM_takes_args(benchmark::State& state, ExtraArgs&&... extra_args) { +// [...] +//} +// /* Registers a benchmark named "BM_takes_args/int_string_test` */ +// BENCHMARK_CAPTURE(BM_takes_args, int_string_test, 42, std::string("abc")); +#define BENCHMARK_CAPTURE(func, test_case_name, ...) \ + BENCHMARK_PRIVATE_DECLARE(func) = \ + (::benchmark::internal::RegisterBenchmarkInternal( \ + new ::benchmark::internal::FunctionBenchmark( \ + #func "/" #test_case_name, \ + [](::benchmark::State& st) { func(st, __VA_ARGS__); }))) + +#endif // BENCHMARK_HAS_CXX11 + +// This will register a benchmark for a templatized function. For example: +// +// template +// void BM_Foo(int iters); +// +// BENCHMARK_TEMPLATE(BM_Foo, 1); +// +// will register BM_Foo<1> as a benchmark. +#define BENCHMARK_TEMPLATE1(n, a) \ + BENCHMARK_PRIVATE_DECLARE(n) = \ + (::benchmark::internal::RegisterBenchmarkInternal( \ + new ::benchmark::internal::FunctionBenchmark(#n "<" #a ">", n))) + +#define BENCHMARK_TEMPLATE2(n, a, b) \ + BENCHMARK_PRIVATE_DECLARE(n) = \ + (::benchmark::internal::RegisterBenchmarkInternal( \ + new ::benchmark::internal::FunctionBenchmark(#n "<" #a "," #b ">", \ + n))) + +#ifdef BENCHMARK_HAS_CXX11 +#define BENCHMARK_TEMPLATE(n, ...) \ + BENCHMARK_PRIVATE_DECLARE(n) = \ + (::benchmark::internal::RegisterBenchmarkInternal( \ + new ::benchmark::internal::FunctionBenchmark( \ + #n "<" #__VA_ARGS__ ">", n<__VA_ARGS__>))) +#else +#define BENCHMARK_TEMPLATE(n, a) BENCHMARK_TEMPLATE1(n, a) +#endif + +#define BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \ + class BaseClass##_##Method##_Benchmark : public BaseClass { \ + public: \ + BaseClass##_##Method##_Benchmark() : BaseClass() { \ + this->SetName(#BaseClass "/" #Method); \ + } \ + \ + protected: \ + virtual void BenchmarkCase(::benchmark::State&); \ + }; + +#define BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \ + class BaseClass##_##Method##_Benchmark : public BaseClass { \ + public: \ + BaseClass##_##Method##_Benchmark() : BaseClass() { \ + this->SetName(#BaseClass "<" #a ">/" #Method); \ + } \ + \ + protected: \ + virtual void BenchmarkCase(::benchmark::State&); \ + }; + +#define BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \ + class BaseClass##_##Method##_Benchmark : public BaseClass { \ + public: \ + BaseClass##_##Method##_Benchmark() : BaseClass() { \ + this->SetName(#BaseClass "<" #a "," #b ">/" #Method); \ + } \ + \ + protected: \ + virtual void BenchmarkCase(::benchmark::State&); \ + }; + +#ifdef BENCHMARK_HAS_CXX11 +#define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, ...) \ + class BaseClass##_##Method##_Benchmark : public BaseClass<__VA_ARGS__> { \ + public: \ + BaseClass##_##Method##_Benchmark() : BaseClass<__VA_ARGS__>() { \ + this->SetName(#BaseClass "<" #__VA_ARGS__ ">/" #Method); \ + } \ + \ + protected: \ + virtual void BenchmarkCase(::benchmark::State&); \ + }; +#else +#define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(n, a) \ + BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(n, a) +#endif + +#define BENCHMARK_DEFINE_F(BaseClass, Method) \ + BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \ + void BaseClass##_##Method##_Benchmark::BenchmarkCase + +#define BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a) \ + BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \ + void BaseClass##_##Method##_Benchmark::BenchmarkCase + +#define BENCHMARK_TEMPLATE2_DEFINE_F(BaseClass, Method, a, b) \ + BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \ + void BaseClass##_##Method##_Benchmark::BenchmarkCase + +#ifdef BENCHMARK_HAS_CXX11 +#define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, ...) \ + BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \ + void BaseClass##_##Method##_Benchmark::BenchmarkCase +#else +#define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, a) \ + BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a) +#endif + +#define BENCHMARK_REGISTER_F(BaseClass, Method) \ + BENCHMARK_PRIVATE_REGISTER_F(BaseClass##_##Method##_Benchmark) + +#define BENCHMARK_PRIVATE_REGISTER_F(TestName) \ + BENCHMARK_PRIVATE_DECLARE(TestName) = \ + (::benchmark::internal::RegisterBenchmarkInternal(new TestName())) + +// This macro will define and register a benchmark within a fixture class. +#define BENCHMARK_F(BaseClass, Method) \ + BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \ + BENCHMARK_REGISTER_F(BaseClass, Method); \ + void BaseClass##_##Method##_Benchmark::BenchmarkCase + +#define BENCHMARK_TEMPLATE1_F(BaseClass, Method, a) \ + BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \ + BENCHMARK_REGISTER_F(BaseClass, Method); \ + void BaseClass##_##Method##_Benchmark::BenchmarkCase + +#define BENCHMARK_TEMPLATE2_F(BaseClass, Method, a, b) \ + BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \ + BENCHMARK_REGISTER_F(BaseClass, Method); \ + void BaseClass##_##Method##_Benchmark::BenchmarkCase + +#ifdef BENCHMARK_HAS_CXX11 +#define BENCHMARK_TEMPLATE_F(BaseClass, Method, ...) \ + BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \ + BENCHMARK_REGISTER_F(BaseClass, Method); \ + void BaseClass##_##Method##_Benchmark::BenchmarkCase +#else +#define BENCHMARK_TEMPLATE_F(BaseClass, Method, a) \ + BENCHMARK_TEMPLATE1_F(BaseClass, Method, a) +#endif + +// Helper macro to create a main routine in a test that runs the benchmarks +#define BENCHMARK_MAIN() \ + int main(int argc, char** argv) { \ + ::benchmark::Initialize(&argc, argv); \ + if (::benchmark::ReportUnrecognizedArguments(argc, argv)) return 1; \ + ::benchmark::RunSpecifiedBenchmarks(); \ + } \ + int main(int, char**) + +// ------------------------------------------------------ +// Benchmark Reporters + +namespace benchmark { + +struct CPUInfo { + struct CacheInfo { + std::string type; + int level; + int size; + int num_sharing; + }; + + enum Scaling { + UNKNOWN, + ENABLED, + DISABLED + }; + + int num_cpus; + double cycles_per_second; + std::vector caches; + Scaling scaling; + std::vector load_avg; + + static const CPUInfo& Get(); + + private: + CPUInfo(); + BENCHMARK_DISALLOW_COPY_AND_ASSIGN(CPUInfo); +}; + +// Adding Struct for System Information +struct SystemInfo { + std::string name; + static const SystemInfo& Get(); + + private: + SystemInfo(); + BENCHMARK_DISALLOW_COPY_AND_ASSIGN(SystemInfo); +}; + +// BenchmarkName contains the components of the Benchmark's name +// which allows individual fields to be modified or cleared before +// building the final name using 'str()'. +struct BenchmarkName { + std::string function_name; + std::string args; + std::string min_time; + std::string iterations; + std::string repetitions; + std::string time_type; + std::string threads; + + // Return the full name of the benchmark with each non-empty + // field separated by a '/' + std::string str() const; +}; + +// Interface for custom benchmark result printers. +// By default, benchmark reports are printed to stdout. However an application +// can control the destination of the reports by calling +// RunSpecifiedBenchmarks and passing it a custom reporter object. +// The reporter object must implement the following interface. +class BenchmarkReporter { + public: + struct Context { + CPUInfo const& cpu_info; + SystemInfo const& sys_info; + // The number of chars in the longest benchmark name. + size_t name_field_width; + static const char* executable_name; + Context(); + }; + + struct Run { + static const int64_t no_repetition_index = -1; + enum RunType { RT_Iteration, RT_Aggregate }; + + Run() + : run_type(RT_Iteration), + error_occurred(false), + iterations(1), + threads(1), + time_unit(kNanosecond), + real_accumulated_time(0), + cpu_accumulated_time(0), + max_heapbytes_used(0), + complexity(oNone), + complexity_lambda(), + complexity_n(0), + report_big_o(false), + report_rms(false), + counters(), + has_memory_result(false), + allocs_per_iter(0.0), + max_bytes_used(0) {} + + std::string benchmark_name() const; + BenchmarkName run_name; + RunType run_type; + std::string aggregate_name; + std::string report_label; // Empty if not set by benchmark. + bool error_occurred; + std::string error_message; + + IterationCount iterations; + int64_t threads; + int64_t repetition_index; + int64_t repetitions; + TimeUnit time_unit; + double real_accumulated_time; + double cpu_accumulated_time; + + // Return a value representing the real time per iteration in the unit + // specified by 'time_unit'. + // NOTE: If 'iterations' is zero the returned value represents the + // accumulated time. + double GetAdjustedRealTime() const; + + // Return a value representing the cpu time per iteration in the unit + // specified by 'time_unit'. + // NOTE: If 'iterations' is zero the returned value represents the + // accumulated time. + double GetAdjustedCPUTime() const; + + // This is set to 0.0 if memory tracing is not enabled. + double max_heapbytes_used; + + // Keep track of arguments to compute asymptotic complexity + BigO complexity; + BigOFunc* complexity_lambda; + int64_t complexity_n; + + // what statistics to compute from the measurements + const std::vector* statistics; + + // Inform print function whether the current run is a complexity report + bool report_big_o; + bool report_rms; + + UserCounters counters; + + // Memory metrics. + bool has_memory_result; + double allocs_per_iter; + int64_t max_bytes_used; + }; + + // Construct a BenchmarkReporter with the output stream set to 'std::cout' + // and the error stream set to 'std::cerr' + BenchmarkReporter(); + + // Called once for every suite of benchmarks run. + // The parameter "context" contains information that the + // reporter may wish to use when generating its report, for example the + // platform under which the benchmarks are running. The benchmark run is + // never started if this function returns false, allowing the reporter + // to skip runs based on the context information. + virtual bool ReportContext(const Context& context) = 0; + + // Called once for each group of benchmark runs, gives information about + // cpu-time and heap memory usage during the benchmark run. If the group + // of runs contained more than two entries then 'report' contains additional + // elements representing the mean and standard deviation of those runs. + // Additionally if this group of runs was the last in a family of benchmarks + // 'reports' contains additional entries representing the asymptotic + // complexity and RMS of that benchmark family. + virtual void ReportRuns(const std::vector& report) = 0; + + // Called once and only once after ever group of benchmarks is run and + // reported. + virtual void Finalize() {} + + // REQUIRES: The object referenced by 'out' is valid for the lifetime + // of the reporter. + void SetOutputStream(std::ostream* out) { + assert(out); + output_stream_ = out; + } + + // REQUIRES: The object referenced by 'err' is valid for the lifetime + // of the reporter. + void SetErrorStream(std::ostream* err) { + assert(err); + error_stream_ = err; + } + + std::ostream& GetOutputStream() const { return *output_stream_; } + + std::ostream& GetErrorStream() const { return *error_stream_; } + + virtual ~BenchmarkReporter(); + + // Write a human readable string to 'out' representing the specified + // 'context'. + // REQUIRES: 'out' is non-null. + static void PrintBasicContext(std::ostream* out, Context const& context); + + private: + std::ostream* output_stream_; + std::ostream* error_stream_; +}; + +// Simple reporter that outputs benchmark data to the console. This is the +// default reporter used by RunSpecifiedBenchmarks(). +class ConsoleReporter : public BenchmarkReporter { + public: + enum OutputOptions { + OO_None = 0, + OO_Color = 1, + OO_Tabular = 2, + OO_ColorTabular = OO_Color | OO_Tabular, + OO_Defaults = OO_ColorTabular + }; + explicit ConsoleReporter(OutputOptions opts_ = OO_Defaults) + : output_options_(opts_), + name_field_width_(0), + prev_counters_(), + printed_header_(false) {} + + virtual bool ReportContext(const Context& context); + virtual void ReportRuns(const std::vector& reports); + + protected: + virtual void PrintRunData(const Run& report); + virtual void PrintHeader(const Run& report); + + OutputOptions output_options_; + size_t name_field_width_; + UserCounters prev_counters_; + bool printed_header_; +}; + +class JSONReporter : public BenchmarkReporter { + public: + JSONReporter() : first_report_(true) {} + virtual bool ReportContext(const Context& context); + virtual void ReportRuns(const std::vector& reports); + virtual void Finalize(); + + private: + void PrintRunData(const Run& report); + + bool first_report_; +}; + +class BENCHMARK_DEPRECATED_MSG( + "The CSV Reporter will be removed in a future release") CSVReporter + : public BenchmarkReporter { + public: + CSVReporter() : printed_header_(false) {} + virtual bool ReportContext(const Context& context); + virtual void ReportRuns(const std::vector& reports); + + private: + void PrintRunData(const Run& report); + + bool printed_header_; + std::set user_counter_names_; +}; + +// If a MemoryManager is registered, it can be used to collect and report +// allocation metrics for a run of the benchmark. +class MemoryManager { + public: + struct Result { + Result() : num_allocs(0), max_bytes_used(0) {} + + // The number of allocations made in total between Start and Stop. + int64_t num_allocs; + + // The peak memory use between Start and Stop. + int64_t max_bytes_used; + }; + + virtual ~MemoryManager() {} + + // Implement this to start recording allocation information. + virtual void Start() = 0; + + // Implement this to stop recording and fill out the given Result structure. + virtual void Stop(Result* result) = 0; +}; + +inline const char* GetTimeUnitString(TimeUnit unit) { + switch (unit) { + case kMillisecond: + return "ms"; + case kMicrosecond: + return "us"; + case kNanosecond: + return "ns"; + } + BENCHMARK_UNREACHABLE(); +} + +inline double GetTimeUnitMultiplier(TimeUnit unit) { + switch (unit) { + case kMillisecond: + return 1e3; + case kMicrosecond: + return 1e6; + case kNanosecond: + return 1e9; + } + BENCHMARK_UNREACHABLE(); +} + +} // namespace benchmark + +#endif // BENCHMARK_BENCHMARK_H_ diff --git a/benchmarks/thirdparty/benchmark/setup.py b/benchmarks/thirdparty/benchmark/setup.py new file mode 100755 index 0000000000..5cdab10cf7 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/setup.py @@ -0,0 +1,140 @@ +import os +import posixpath +import re +import shutil +import sys + +from distutils import sysconfig +import setuptools +from setuptools.command import build_ext + + +HERE = os.path.dirname(os.path.abspath(__file__)) + + +IS_WINDOWS = sys.platform.startswith("win") + + +def _get_version(): + """Parse the version string from __init__.py.""" + with open( + os.path.join(HERE, "bindings", "python", "google_benchmark", "__init__.py") + ) as init_file: + try: + version_line = next( + line for line in init_file if line.startswith("__version__") + ) + except StopIteration: + raise ValueError("__version__ not defined in __init__.py") + else: + namespace = {} + exec(version_line, namespace) # pylint: disable=exec-used + return namespace["__version__"] + + +def _parse_requirements(path): + with open(os.path.join(HERE, path)) as requirements: + return [ + line.rstrip() + for line in requirements + if not (line.isspace() or line.startswith("#")) + ] + + +class BazelExtension(setuptools.Extension): + """A C/C++ extension that is defined as a Bazel BUILD target.""" + + def __init__(self, name, bazel_target): + self.bazel_target = bazel_target + self.relpath, self.target_name = posixpath.relpath(bazel_target, "//").split( + ":" + ) + setuptools.Extension.__init__(self, name, sources=[]) + + +class BuildBazelExtension(build_ext.build_ext): + """A command that runs Bazel to build a C/C++ extension.""" + + def run(self): + for ext in self.extensions: + self.bazel_build(ext) + build_ext.build_ext.run(self) + + def bazel_build(self, ext): + """Runs the bazel build to create the package.""" + with open("WORKSPACE", "r") as workspace: + workspace_contents = workspace.read() + + with open("WORKSPACE", "w") as workspace: + workspace.write( + re.sub( + r'(?<=path = ").*(?=", # May be overwritten by setup\.py\.)', + sysconfig.get_python_inc().replace(os.path.sep, posixpath.sep), + workspace_contents, + ) + ) + + if not os.path.exists(self.build_temp): + os.makedirs(self.build_temp) + + bazel_argv = [ + "bazel", + "build", + ext.bazel_target, + "--symlink_prefix=" + os.path.join(self.build_temp, "bazel-"), + "--compilation_mode=" + ("dbg" if self.debug else "opt"), + ] + + if IS_WINDOWS: + # Link with python*.lib. + for library_dir in self.library_dirs: + bazel_argv.append("--linkopt=/LIBPATH:" + library_dir) + + self.spawn(bazel_argv) + + shared_lib_suffix = '.dll' if IS_WINDOWS else '.so' + ext_bazel_bin_path = os.path.join( + self.build_temp, 'bazel-bin', + ext.relpath, ext.target_name + shared_lib_suffix) + + ext_dest_path = self.get_ext_fullpath(ext.name) + ext_dest_dir = os.path.dirname(ext_dest_path) + if not os.path.exists(ext_dest_dir): + os.makedirs(ext_dest_dir) + shutil.copyfile(ext_bazel_bin_path, ext_dest_path) + + +setuptools.setup( + name="google_benchmark", + version=_get_version(), + url="https://github.com/google/benchmark", + description="A library to benchmark code snippets.", + author="Google", + author_email="benchmark-py@google.com", + # Contained modules and scripts. + package_dir={"": "bindings/python"}, + packages=setuptools.find_packages("bindings/python"), + install_requires=_parse_requirements("bindings/python/requirements.txt"), + cmdclass=dict(build_ext=BuildBazelExtension), + ext_modules=[ + BazelExtension( + "google_benchmark._benchmark", + "//bindings/python/google_benchmark:_benchmark", + ) + ], + zip_safe=False, + # PyPI package information. + classifiers=[ + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Topic :: Software Development :: Testing", + "Topic :: System :: Benchmark", + ], + license="Apache 2.0", + keywords="benchmark", +) diff --git a/benchmarks/thirdparty/benchmark/src/CMakeLists.txt b/benchmarks/thirdparty/benchmark/src/CMakeLists.txt new file mode 100755 index 0000000000..35d559eeae --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/CMakeLists.txt @@ -0,0 +1,114 @@ +# Allow the source files to find headers in src/ +include(GNUInstallDirs) +include_directories(${PROJECT_SOURCE_DIR}/src) + +if (DEFINED BENCHMARK_CXX_LINKER_FLAGS) + list(APPEND CMAKE_SHARED_LINKER_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}) + list(APPEND CMAKE_MODULE_LINKER_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}) +endif() + +file(GLOB + SOURCE_FILES + *.cc + ${PROJECT_SOURCE_DIR}/include/benchmark/*.h + ${CMAKE_CURRENT_SOURCE_DIR}/*.h) +file(GLOB BENCHMARK_MAIN "benchmark_main.cc") +foreach(item ${BENCHMARK_MAIN}) + list(REMOVE_ITEM SOURCE_FILES "${item}") +endforeach() + +add_library(benchmark ${SOURCE_FILES}) +add_library(benchmark::benchmark ALIAS benchmark) +set_target_properties(benchmark PROPERTIES + OUTPUT_NAME "benchmark" + VERSION ${GENERIC_LIB_VERSION} + SOVERSION ${GENERIC_LIB_SOVERSION} +) +target_include_directories(benchmark PUBLIC + $ + ) + +# Link threads. +target_link_libraries(benchmark ${BENCHMARK_CXX_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) +find_library(LIBRT rt) +if(LIBRT) + target_link_libraries(benchmark ${LIBRT}) +endif() + +if(CMAKE_BUILD_TYPE) + string(TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UPPER) +endif() +if(NOT CMAKE_THREAD_LIBS_INIT AND "${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}" MATCHES ".*-fsanitize=[^ ]*address.*") + message(WARNING "CMake's FindThreads.cmake did not fail, but CMAKE_THREAD_LIBS_INIT ended up being empty. This was fixed in https://github.com/Kitware/CMake/commit/d53317130e84898c5328c237186dbd995aaf1c12 Let's guess that -pthread is sufficient.") + target_link_libraries(benchmark -pthread) +endif() + +# We need extra libraries on Windows +if(${CMAKE_SYSTEM_NAME} MATCHES "Windows") + target_link_libraries(benchmark shlwapi) +endif() + +# We need extra libraries on Solaris +if(${CMAKE_SYSTEM_NAME} MATCHES "SunOS") + target_link_libraries(benchmark kstat) +endif() + +# Benchmark main library +add_library(benchmark_main "benchmark_main.cc") +add_library(benchmark::benchmark_main ALIAS benchmark_main) +set_target_properties(benchmark_main PROPERTIES + OUTPUT_NAME "benchmark_main" + VERSION ${GENERIC_LIB_VERSION} + SOVERSION ${GENERIC_LIB_SOVERSION} +) +target_include_directories(benchmark PUBLIC + $ + ) +target_link_libraries(benchmark_main benchmark::benchmark) + + +set(generated_dir "${CMAKE_CURRENT_BINARY_DIR}/generated") + +set(version_config "${generated_dir}/${PROJECT_NAME}ConfigVersion.cmake") +set(project_config "${generated_dir}/${PROJECT_NAME}Config.cmake") +set(pkg_config "${generated_dir}/${PROJECT_NAME}.pc") +set(targets_export_name "${PROJECT_NAME}Targets") + +set(namespace "${PROJECT_NAME}::") + +include(CMakePackageConfigHelpers) +write_basic_package_version_file( + "${version_config}" VERSION ${GENERIC_LIB_VERSION} COMPATIBILITY SameMajorVersion +) + +configure_file("${PROJECT_SOURCE_DIR}/cmake/Config.cmake.in" "${project_config}" @ONLY) +configure_file("${PROJECT_SOURCE_DIR}/cmake/benchmark.pc.in" "${pkg_config}" @ONLY) + +if (BENCHMARK_ENABLE_INSTALL) + # Install target (will install the library to specified CMAKE_INSTALL_PREFIX variable) + install( + TARGETS benchmark benchmark_main + EXPORT ${targets_export_name} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) + + install( + DIRECTORY "${PROJECT_SOURCE_DIR}/include/benchmark" + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} + FILES_MATCHING PATTERN "*.*h") + + install( + FILES "${project_config}" "${version_config}" + DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}") + + install( + FILES "${pkg_config}" + DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig") + + install( + EXPORT "${targets_export_name}" + NAMESPACE "${namespace}" + DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}") +endif() diff --git a/benchmarks/thirdparty/benchmark/src/arraysize.h b/benchmarks/thirdparty/benchmark/src/arraysize.h new file mode 100755 index 0000000000..51a50f2dff --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/arraysize.h @@ -0,0 +1,33 @@ +#ifndef BENCHMARK_ARRAYSIZE_H_ +#define BENCHMARK_ARRAYSIZE_H_ + +#include "internal_macros.h" + +namespace benchmark { +namespace internal { +// The arraysize(arr) macro returns the # of elements in an array arr. +// The expression is a compile-time constant, and therefore can be +// used in defining new arrays, for example. If you use arraysize on +// a pointer by mistake, you will get a compile-time error. +// + +// This template function declaration is used in defining arraysize. +// Note that the function doesn't need an implementation, as we only +// use its type. +template +char (&ArraySizeHelper(T (&array)[N]))[N]; + +// That gcc wants both of these prototypes seems mysterious. VC, for +// its part, can't decide which to use (another mystery). Matching of +// template overloads: the final frontier. +#ifndef COMPILER_MSVC +template +char (&ArraySizeHelper(const T (&array)[N]))[N]; +#endif + +#define arraysize(array) (sizeof(::benchmark::internal::ArraySizeHelper(array))) + +} // end namespace internal +} // end namespace benchmark + +#endif // BENCHMARK_ARRAYSIZE_H_ diff --git a/benchmarks/thirdparty/benchmark/src/benchmark.cc b/benchmarks/thirdparty/benchmark/src/benchmark.cc new file mode 100755 index 0000000000..1c049f2884 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/benchmark.cc @@ -0,0 +1,499 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "benchmark/benchmark.h" +#include "benchmark_api_internal.h" +#include "benchmark_runner.h" +#include "internal_macros.h" + +#ifndef BENCHMARK_OS_WINDOWS +#ifndef BENCHMARK_OS_FUCHSIA +#include +#endif +#include +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "check.h" +#include "colorprint.h" +#include "commandlineflags.h" +#include "complexity.h" +#include "counter.h" +#include "internal_macros.h" +#include "log.h" +#include "mutex.h" +#include "re.h" +#include "statistics.h" +#include "string_util.h" +#include "thread_manager.h" +#include "thread_timer.h" + +// Print a list of benchmarks. This option overrides all other options. +DEFINE_bool(benchmark_list_tests, false); + +// A regular expression that specifies the set of benchmarks to execute. If +// this flag is empty, or if this flag is the string \"all\", all benchmarks +// linked into the binary are run. +DEFINE_string(benchmark_filter, "."); + +// Minimum number of seconds we should run benchmark before results are +// considered significant. For cpu-time based tests, this is the lower bound +// on the total cpu time used by all threads that make up the test. For +// real-time based tests, this is the lower bound on the elapsed time of the +// benchmark execution, regardless of number of threads. +DEFINE_double(benchmark_min_time, 0.5); + +// The number of runs of each benchmark. If greater than 1, the mean and +// standard deviation of the runs will be reported. +DEFINE_int32(benchmark_repetitions, 1); + +// Report the result of each benchmark repetitions. When 'true' is specified +// only the mean, standard deviation, and other statistics are reported for +// repeated benchmarks. Affects all reporters. +DEFINE_bool(benchmark_report_aggregates_only, false); + +// Display the result of each benchmark repetitions. When 'true' is specified +// only the mean, standard deviation, and other statistics are displayed for +// repeated benchmarks. Unlike benchmark_report_aggregates_only, only affects +// the display reporter, but *NOT* file reporter, which will still contain +// all the output. +DEFINE_bool(benchmark_display_aggregates_only, false); + +// The format to use for console output. +// Valid values are 'console', 'json', or 'csv'. +DEFINE_string(benchmark_format, "console"); + +// The format to use for file output. +// Valid values are 'console', 'json', or 'csv'. +DEFINE_string(benchmark_out_format, "json"); + +// The file to write additional output to. +DEFINE_string(benchmark_out, ""); + +// Whether to use colors in the output. Valid values: +// 'true'/'yes'/1, 'false'/'no'/0, and 'auto'. 'auto' means to use colors if +// the output is being sent to a terminal and the TERM environment variable is +// set to a terminal type that supports colors. +DEFINE_string(benchmark_color, "auto"); + +// Whether to use tabular format when printing user counters to the console. +// Valid values: 'true'/'yes'/1, 'false'/'no'/0. Defaults to false. +DEFINE_bool(benchmark_counters_tabular, false); + +// The level of verbose logging to output +DEFINE_int32(v, 0); + +namespace benchmark { + +namespace internal { + +// FIXME: wouldn't LTO mess this up? +void UseCharPointer(char const volatile*) {} + +} // namespace internal + +State::State(IterationCount max_iters, const std::vector& ranges, + int thread_i, int n_threads, internal::ThreadTimer* timer, + internal::ThreadManager* manager) + : total_iterations_(0), + batch_leftover_(0), + max_iterations(max_iters), + started_(false), + finished_(false), + error_occurred_(false), + range_(ranges), + complexity_n_(0), + counters(), + thread_index(thread_i), + threads(n_threads), + timer_(timer), + manager_(manager) { + CHECK(max_iterations != 0) << "At least one iteration must be run"; + CHECK_LT(thread_index, threads) << "thread_index must be less than threads"; + + // Note: The use of offsetof below is technically undefined until C++17 + // because State is not a standard layout type. However, all compilers + // currently provide well-defined behavior as an extension (which is + // demonstrated since constexpr evaluation must diagnose all undefined + // behavior). However, GCC and Clang also warn about this use of offsetof, + // which must be suppressed. +#if defined(__INTEL_COMPILER) +#pragma warning push +#pragma warning(disable : 1875) +#elif defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Winvalid-offsetof" +#endif + // Offset tests to ensure commonly accessed data is on the first cache line. + const int cache_line_size = 64; + static_assert(offsetof(State, error_occurred_) <= + (cache_line_size - sizeof(error_occurred_)), + ""); +#if defined(__INTEL_COMPILER) +#pragma warning pop +#elif defined(__GNUC__) +#pragma GCC diagnostic pop +#endif +} + +void State::PauseTiming() { + // Add in time accumulated so far + CHECK(started_ && !finished_ && !error_occurred_); + timer_->StopTimer(); +} + +void State::ResumeTiming() { + CHECK(started_ && !finished_ && !error_occurred_); + timer_->StartTimer(); +} + +void State::SkipWithError(const char* msg) { + CHECK(msg); + error_occurred_ = true; + { + MutexLock l(manager_->GetBenchmarkMutex()); + if (manager_->results.has_error_ == false) { + manager_->results.error_message_ = msg; + manager_->results.has_error_ = true; + } + } + total_iterations_ = 0; + if (timer_->running()) timer_->StopTimer(); +} + +void State::SetIterationTime(double seconds) { + timer_->SetIterationTime(seconds); +} + +void State::SetLabel(const char* label) { + MutexLock l(manager_->GetBenchmarkMutex()); + manager_->results.report_label_ = label; +} + +void State::StartKeepRunning() { + CHECK(!started_ && !finished_); + started_ = true; + total_iterations_ = error_occurred_ ? 0 : max_iterations; + manager_->StartStopBarrier(); + if (!error_occurred_) ResumeTiming(); +} + +void State::FinishKeepRunning() { + CHECK(started_ && (!finished_ || error_occurred_)); + if (!error_occurred_) { + PauseTiming(); + } + // Total iterations has now wrapped around past 0. Fix this. + total_iterations_ = 0; + finished_ = true; + manager_->StartStopBarrier(); +} + +namespace internal { +namespace { + +void RunBenchmarks(const std::vector& benchmarks, + BenchmarkReporter* display_reporter, + BenchmarkReporter* file_reporter) { + // Note the file_reporter can be null. + CHECK(display_reporter != nullptr); + + // Determine the width of the name field using a minimum width of 10. + bool might_have_aggregates = FLAGS_benchmark_repetitions > 1; + size_t name_field_width = 10; + size_t stat_field_width = 0; + for (const BenchmarkInstance& benchmark : benchmarks) { + name_field_width = + std::max(name_field_width, benchmark.name.str().size()); + might_have_aggregates |= benchmark.repetitions > 1; + + for (const auto& Stat : *benchmark.statistics) + stat_field_width = std::max(stat_field_width, Stat.name_.size()); + } + if (might_have_aggregates) name_field_width += 1 + stat_field_width; + + // Print header here + BenchmarkReporter::Context context; + context.name_field_width = name_field_width; + + // Keep track of running times of all instances of current benchmark + std::vector complexity_reports; + + // We flush streams after invoking reporter methods that write to them. This + // ensures users get timely updates even when streams are not line-buffered. + auto flushStreams = [](BenchmarkReporter* reporter) { + if (!reporter) return; + std::flush(reporter->GetOutputStream()); + std::flush(reporter->GetErrorStream()); + }; + + if (display_reporter->ReportContext(context) && + (!file_reporter || file_reporter->ReportContext(context))) { + flushStreams(display_reporter); + flushStreams(file_reporter); + + for (const auto& benchmark : benchmarks) { + RunResults run_results = RunBenchmark(benchmark, &complexity_reports); + + auto report = [&run_results](BenchmarkReporter* reporter, + bool report_aggregates_only) { + assert(reporter); + // If there are no aggregates, do output non-aggregates. + report_aggregates_only &= !run_results.aggregates_only.empty(); + if (!report_aggregates_only) + reporter->ReportRuns(run_results.non_aggregates); + if (!run_results.aggregates_only.empty()) + reporter->ReportRuns(run_results.aggregates_only); + }; + + report(display_reporter, run_results.display_report_aggregates_only); + if (file_reporter) + report(file_reporter, run_results.file_report_aggregates_only); + + flushStreams(display_reporter); + flushStreams(file_reporter); + } + } + display_reporter->Finalize(); + if (file_reporter) file_reporter->Finalize(); + flushStreams(display_reporter); + flushStreams(file_reporter); +} + +// Disable deprecated warnings temporarily because we need to reference +// CSVReporter but don't want to trigger -Werror=-Wdeprecated-declarations +#ifdef __GNUC__ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" +#endif + +std::unique_ptr CreateReporter( + std::string const& name, ConsoleReporter::OutputOptions output_opts) { + typedef std::unique_ptr PtrType; + if (name == "console") { + return PtrType(new ConsoleReporter(output_opts)); + } else if (name == "json") { + return PtrType(new JSONReporter); + } else if (name == "csv") { + return PtrType(new CSVReporter); + } else { + std::cerr << "Unexpected format: '" << name << "'\n"; + std::exit(1); + } +} + +#ifdef __GNUC__ +#pragma GCC diagnostic pop +#endif + +} // end namespace + +bool IsZero(double n) { + return std::abs(n) < std::numeric_limits::epsilon(); +} + +ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color) { + int output_opts = ConsoleReporter::OO_Defaults; + auto is_benchmark_color = [force_no_color]() -> bool { + if (force_no_color) { + return false; + } + if (FLAGS_benchmark_color == "auto") { + return IsColorTerminal(); + } + return IsTruthyFlagValue(FLAGS_benchmark_color); + }; + if (is_benchmark_color()) { + output_opts |= ConsoleReporter::OO_Color; + } else { + output_opts &= ~ConsoleReporter::OO_Color; + } + if (FLAGS_benchmark_counters_tabular) { + output_opts |= ConsoleReporter::OO_Tabular; + } else { + output_opts &= ~ConsoleReporter::OO_Tabular; + } + return static_cast(output_opts); +} + +} // end namespace internal + +size_t RunSpecifiedBenchmarks() { + return RunSpecifiedBenchmarks(nullptr, nullptr); +} + +size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter) { + return RunSpecifiedBenchmarks(display_reporter, nullptr); +} + +size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter, + BenchmarkReporter* file_reporter) { + std::string spec = FLAGS_benchmark_filter; + if (spec.empty() || spec == "all") + spec = "."; // Regexp that matches all benchmarks + + // Setup the reporters + std::ofstream output_file; + std::unique_ptr default_display_reporter; + std::unique_ptr default_file_reporter; + if (!display_reporter) { + default_display_reporter = internal::CreateReporter( + FLAGS_benchmark_format, internal::GetOutputOptions()); + display_reporter = default_display_reporter.get(); + } + auto& Out = display_reporter->GetOutputStream(); + auto& Err = display_reporter->GetErrorStream(); + + std::string const& fname = FLAGS_benchmark_out; + if (fname.empty() && file_reporter) { + Err << "A custom file reporter was provided but " + "--benchmark_out= was not specified." + << std::endl; + std::exit(1); + } + if (!fname.empty()) { + output_file.open(fname); + if (!output_file.is_open()) { + Err << "invalid file name: '" << fname << std::endl; + std::exit(1); + } + if (!file_reporter) { + default_file_reporter = internal::CreateReporter( + FLAGS_benchmark_out_format, ConsoleReporter::OO_None); + file_reporter = default_file_reporter.get(); + } + file_reporter->SetOutputStream(&output_file); + file_reporter->SetErrorStream(&output_file); + } + + std::vector benchmarks; + if (!FindBenchmarksInternal(spec, &benchmarks, &Err)) return 0; + + if (benchmarks.empty()) { + Err << "Failed to match any benchmarks against regex: " << spec << "\n"; + return 0; + } + + if (FLAGS_benchmark_list_tests) { + for (auto const& benchmark : benchmarks) + Out << benchmark.name.str() << "\n"; + } else { + internal::RunBenchmarks(benchmarks, display_reporter, file_reporter); + } + + return benchmarks.size(); +} + +void RegisterMemoryManager(MemoryManager* manager) { + internal::memory_manager = manager; +} + +namespace internal { + +void PrintUsageAndExit() { + fprintf(stdout, + "benchmark" + " [--benchmark_list_tests={true|false}]\n" + " [--benchmark_filter=]\n" + " [--benchmark_min_time=]\n" + " [--benchmark_repetitions=]\n" + " [--benchmark_report_aggregates_only={true|false}]\n" + " [--benchmark_display_aggregates_only={true|false}]\n" + " [--benchmark_format=]\n" + " [--benchmark_out=]\n" + " [--benchmark_out_format=]\n" + " [--benchmark_color={auto|true|false}]\n" + " [--benchmark_counters_tabular={true|false}]\n" + " [--v=]\n"); + exit(0); +} + +void ParseCommandLineFlags(int* argc, char** argv) { + using namespace benchmark; + BenchmarkReporter::Context::executable_name = + (argc && *argc > 0) ? argv[0] : "unknown"; + for (int i = 1; argc && i < *argc; ++i) { + if (ParseBoolFlag(argv[i], "benchmark_list_tests", + &FLAGS_benchmark_list_tests) || + ParseStringFlag(argv[i], "benchmark_filter", &FLAGS_benchmark_filter) || + ParseDoubleFlag(argv[i], "benchmark_min_time", + &FLAGS_benchmark_min_time) || + ParseInt32Flag(argv[i], "benchmark_repetitions", + &FLAGS_benchmark_repetitions) || + ParseBoolFlag(argv[i], "benchmark_report_aggregates_only", + &FLAGS_benchmark_report_aggregates_only) || + ParseBoolFlag(argv[i], "benchmark_display_aggregates_only", + &FLAGS_benchmark_display_aggregates_only) || + ParseStringFlag(argv[i], "benchmark_format", &FLAGS_benchmark_format) || + ParseStringFlag(argv[i], "benchmark_out", &FLAGS_benchmark_out) || + ParseStringFlag(argv[i], "benchmark_out_format", + &FLAGS_benchmark_out_format) || + ParseStringFlag(argv[i], "benchmark_color", &FLAGS_benchmark_color) || + // "color_print" is the deprecated name for "benchmark_color". + // TODO: Remove this. + ParseStringFlag(argv[i], "color_print", &FLAGS_benchmark_color) || + ParseBoolFlag(argv[i], "benchmark_counters_tabular", + &FLAGS_benchmark_counters_tabular) || + ParseInt32Flag(argv[i], "v", &FLAGS_v)) { + for (int j = i; j != *argc - 1; ++j) argv[j] = argv[j + 1]; + + --(*argc); + --i; + } else if (IsFlag(argv[i], "help")) { + PrintUsageAndExit(); + } + } + for (auto const* flag : + {&FLAGS_benchmark_format, &FLAGS_benchmark_out_format}) + if (*flag != "console" && *flag != "json" && *flag != "csv") { + PrintUsageAndExit(); + } + if (FLAGS_benchmark_color.empty()) { + PrintUsageAndExit(); + } +} + +int InitializeStreams() { + static std::ios_base::Init init; + return 0; +} + +} // end namespace internal + +void Initialize(int* argc, char** argv) { + internal::ParseCommandLineFlags(argc, argv); + internal::LogLevel() = FLAGS_v; +} + +bool ReportUnrecognizedArguments(int argc, char** argv) { + for (int i = 1; i < argc; ++i) { + fprintf(stderr, "%s: error: unrecognized command-line flag: %s\n", argv[0], + argv[i]); + } + return argc > 1; +} + +} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/benchmark_api_internal.cc b/benchmarks/thirdparty/benchmark/src/benchmark_api_internal.cc new file mode 100755 index 0000000000..d468a257e3 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/benchmark_api_internal.cc @@ -0,0 +1,15 @@ +#include "benchmark_api_internal.h" + +namespace benchmark { +namespace internal { + +State BenchmarkInstance::Run(IterationCount iters, int thread_id, + internal::ThreadTimer* timer, + internal::ThreadManager* manager) const { + State st(iters, arg, thread_id, threads, timer, manager); + benchmark->Run(st); + return st; +} + +} // internal +} // benchmark diff --git a/benchmarks/thirdparty/benchmark/src/benchmark_api_internal.h b/benchmarks/thirdparty/benchmark/src/benchmark_api_internal.h new file mode 100755 index 0000000000..264eff95c5 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/benchmark_api_internal.h @@ -0,0 +1,53 @@ +#ifndef BENCHMARK_API_INTERNAL_H +#define BENCHMARK_API_INTERNAL_H + +#include "benchmark/benchmark.h" +#include "commandlineflags.h" + +#include +#include +#include +#include +#include +#include + +namespace benchmark { +namespace internal { + +// Information kept per benchmark we may want to run +struct BenchmarkInstance { + BenchmarkName name; + Benchmark* benchmark; + AggregationReportMode aggregation_report_mode; + std::vector arg; + TimeUnit time_unit; + int range_multiplier; + bool measure_process_cpu_time; + bool use_real_time; + bool use_manual_time; + BigO complexity; + BigOFunc* complexity_lambda; + UserCounters counters; + const std::vector* statistics; + bool last_benchmark_instance; + int repetitions; + double min_time; + IterationCount iterations; + int threads; // Number of concurrent threads to us + + State Run(IterationCount iters, int thread_id, internal::ThreadTimer* timer, + internal::ThreadManager* manager) const; +}; + +bool FindBenchmarksInternal(const std::string& re, + std::vector* benchmarks, + std::ostream* Err); + +bool IsZero(double n); + +ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color = false); + +} // end namespace internal +} // end namespace benchmark + +#endif // BENCHMARK_API_INTERNAL_H diff --git a/benchmarks/thirdparty/benchmark/src/benchmark_main.cc b/benchmarks/thirdparty/benchmark/src/benchmark_main.cc new file mode 100755 index 0000000000..b3b2478314 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/benchmark_main.cc @@ -0,0 +1,17 @@ +// Copyright 2018 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "benchmark/benchmark.h" + +BENCHMARK_MAIN(); diff --git a/benchmarks/thirdparty/benchmark/src/benchmark_name.cc b/benchmarks/thirdparty/benchmark/src/benchmark_name.cc new file mode 100755 index 0000000000..2a17ebce27 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/benchmark_name.cc @@ -0,0 +1,58 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +namespace benchmark { + +namespace { + +// Compute the total size of a pack of std::strings +size_t size_impl() { return 0; } + +template +size_t size_impl(const Head& head, const Tail&... tail) { + return head.size() + size_impl(tail...); +} + +// Join a pack of std::strings using a delimiter +// TODO: use absl::StrJoin +void join_impl(std::string&, char) {} + +template +void join_impl(std::string& s, const char delimiter, const Head& head, + const Tail&... tail) { + if (!s.empty() && !head.empty()) { + s += delimiter; + } + + s += head; + + join_impl(s, delimiter, tail...); +} + +template +std::string join(char delimiter, const Ts&... ts) { + std::string s; + s.reserve(sizeof...(Ts) + size_impl(ts...)); + join_impl(s, delimiter, ts...); + return s; +} +} // namespace + +std::string BenchmarkName::str() const { + return join('/', function_name, args, min_time, iterations, repetitions, + time_type, threads); +} +} // namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/benchmark_register.cc b/benchmarks/thirdparty/benchmark/src/benchmark_register.cc new file mode 100755 index 0000000000..65d9944f4f --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/benchmark_register.cc @@ -0,0 +1,515 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "benchmark_register.h" + +#ifndef BENCHMARK_OS_WINDOWS +#ifndef BENCHMARK_OS_FUCHSIA +#include +#endif +#include +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif +#include + +#include "benchmark/benchmark.h" +#include "benchmark_api_internal.h" +#include "check.h" +#include "commandlineflags.h" +#include "complexity.h" +#include "internal_macros.h" +#include "log.h" +#include "mutex.h" +#include "re.h" +#include "statistics.h" +#include "string_util.h" +#include "timers.h" + +namespace benchmark { + +namespace { +// For non-dense Range, intermediate values are powers of kRangeMultiplier. +static const int kRangeMultiplier = 8; +// The size of a benchmark family determines is the number of inputs to repeat +// the benchmark on. If this is "large" then warn the user during configuration. +static const size_t kMaxFamilySize = 100; +} // end namespace + +namespace internal { + +//=============================================================================// +// BenchmarkFamilies +//=============================================================================// + +// Class for managing registered benchmarks. Note that each registered +// benchmark identifies a family of related benchmarks to run. +class BenchmarkFamilies { + public: + static BenchmarkFamilies* GetInstance(); + + // Registers a benchmark family and returns the index assigned to it. + size_t AddBenchmark(std::unique_ptr family); + + // Clear all registered benchmark families. + void ClearBenchmarks(); + + // Extract the list of benchmark instances that match the specified + // regular expression. + bool FindBenchmarks(std::string re, + std::vector* benchmarks, + std::ostream* Err); + + private: + BenchmarkFamilies() {} + + std::vector> families_; + Mutex mutex_; +}; + +BenchmarkFamilies* BenchmarkFamilies::GetInstance() { + static BenchmarkFamilies instance; + return &instance; +} + +size_t BenchmarkFamilies::AddBenchmark(std::unique_ptr family) { + MutexLock l(mutex_); + size_t index = families_.size(); + families_.push_back(std::move(family)); + return index; +} + +void BenchmarkFamilies::ClearBenchmarks() { + MutexLock l(mutex_); + families_.clear(); + families_.shrink_to_fit(); +} + +bool BenchmarkFamilies::FindBenchmarks( + std::string spec, std::vector* benchmarks, + std::ostream* ErrStream) { + CHECK(ErrStream); + auto& Err = *ErrStream; + // Make regular expression out of command-line flag + std::string error_msg; + Regex re; + bool isNegativeFilter = false; + if (spec[0] == '-') { + spec.replace(0, 1, ""); + isNegativeFilter = true; + } + if (!re.Init(spec, &error_msg)) { + Err << "Could not compile benchmark re: " << error_msg << std::endl; + return false; + } + + // Special list of thread counts to use when none are specified + const std::vector one_thread = {1}; + + MutexLock l(mutex_); + for (std::unique_ptr& family : families_) { + // Family was deleted or benchmark doesn't match + if (!family) continue; + + if (family->ArgsCnt() == -1) { + family->Args({}); + } + const std::vector* thread_counts = + (family->thread_counts_.empty() + ? &one_thread + : &static_cast&>(family->thread_counts_)); + const size_t family_size = family->args_.size() * thread_counts->size(); + // The benchmark will be run at least 'family_size' different inputs. + // If 'family_size' is very large warn the user. + if (family_size > kMaxFamilySize) { + Err << "The number of inputs is very large. " << family->name_ + << " will be repeated at least " << family_size << " times.\n"; + } + // reserve in the special case the regex ".", since we know the final + // family size. + if (spec == ".") benchmarks->reserve(family_size); + + for (auto const& args : family->args_) { + for (int num_threads : *thread_counts) { + BenchmarkInstance instance; + instance.name.function_name = family->name_; + instance.benchmark = family.get(); + instance.aggregation_report_mode = family->aggregation_report_mode_; + instance.arg = args; + instance.time_unit = family->time_unit_; + instance.range_multiplier = family->range_multiplier_; + instance.min_time = family->min_time_; + instance.iterations = family->iterations_; + instance.repetitions = family->repetitions_; + instance.measure_process_cpu_time = family->measure_process_cpu_time_; + instance.use_real_time = family->use_real_time_; + instance.use_manual_time = family->use_manual_time_; + instance.complexity = family->complexity_; + instance.complexity_lambda = family->complexity_lambda_; + instance.statistics = &family->statistics_; + instance.threads = num_threads; + + // Add arguments to instance name + size_t arg_i = 0; + for (auto const& arg : args) { + if (!instance.name.args.empty()) { + instance.name.args += '/'; + } + + if (arg_i < family->arg_names_.size()) { + const auto& arg_name = family->arg_names_[arg_i]; + if (!arg_name.empty()) { + instance.name.args += StrFormat("%s:", arg_name.c_str()); + } + } + + instance.name.args += StrFormat("%" PRId64, arg); + ++arg_i; + } + + if (!IsZero(family->min_time_)) + instance.name.min_time = + StrFormat("min_time:%0.3f", family->min_time_); + if (family->iterations_ != 0) { + instance.name.iterations = + StrFormat("iterations:%lu", + static_cast(family->iterations_)); + } + if (family->repetitions_ != 0) + instance.name.repetitions = + StrFormat("repeats:%d", family->repetitions_); + + if (family->measure_process_cpu_time_) { + instance.name.time_type = "process_time"; + } + + if (family->use_manual_time_) { + if (!instance.name.time_type.empty()) { + instance.name.time_type += '/'; + } + instance.name.time_type += "manual_time"; + } else if (family->use_real_time_) { + if (!instance.name.time_type.empty()) { + instance.name.time_type += '/'; + } + instance.name.time_type += "real_time"; + } + + // Add the number of threads used to the name + if (!family->thread_counts_.empty()) { + instance.name.threads = StrFormat("threads:%d", instance.threads); + } + + const auto full_name = instance.name.str(); + if ((re.Match(full_name) && !isNegativeFilter) || + (!re.Match(full_name) && isNegativeFilter)) { + instance.last_benchmark_instance = (&args == &family->args_.back()); + benchmarks->push_back(std::move(instance)); + } + } + } + } + return true; +} + +Benchmark* RegisterBenchmarkInternal(Benchmark* bench) { + std::unique_ptr bench_ptr(bench); + BenchmarkFamilies* families = BenchmarkFamilies::GetInstance(); + families->AddBenchmark(std::move(bench_ptr)); + return bench; +} + +// FIXME: This function is a hack so that benchmark.cc can access +// `BenchmarkFamilies` +bool FindBenchmarksInternal(const std::string& re, + std::vector* benchmarks, + std::ostream* Err) { + return BenchmarkFamilies::GetInstance()->FindBenchmarks(re, benchmarks, Err); +} + +//=============================================================================// +// Benchmark +//=============================================================================// + +Benchmark::Benchmark(const char* name) + : name_(name), + aggregation_report_mode_(ARM_Unspecified), + time_unit_(kNanosecond), + range_multiplier_(kRangeMultiplier), + min_time_(0), + iterations_(0), + repetitions_(0), + measure_process_cpu_time_(false), + use_real_time_(false), + use_manual_time_(false), + complexity_(oNone), + complexity_lambda_(nullptr) { + ComputeStatistics("mean", StatisticsMean); + ComputeStatistics("median", StatisticsMedian); + ComputeStatistics("stddev", StatisticsStdDev); +} + +Benchmark::~Benchmark() {} + +Benchmark* Benchmark::Arg(int64_t x) { + CHECK(ArgsCnt() == -1 || ArgsCnt() == 1); + args_.push_back({x}); + return this; +} + +Benchmark* Benchmark::Unit(TimeUnit unit) { + time_unit_ = unit; + return this; +} + +Benchmark* Benchmark::Range(int64_t start, int64_t limit) { + CHECK(ArgsCnt() == -1 || ArgsCnt() == 1); + std::vector arglist; + AddRange(&arglist, start, limit, range_multiplier_); + + for (int64_t i : arglist) { + args_.push_back({i}); + } + return this; +} + +Benchmark* Benchmark::Ranges( + const std::vector>& ranges) { + CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast(ranges.size())); + std::vector> arglists(ranges.size()); + for (std::size_t i = 0; i < ranges.size(); i++) { + AddRange(&arglists[i], ranges[i].first, ranges[i].second, + range_multiplier_); + } + + ArgsProduct(arglists); + + return this; +} + +Benchmark* Benchmark::ArgsProduct( + const std::vector>& arglists) { + CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast(arglists.size())); + + std::vector indices(arglists.size()); + const std::size_t total = std::accumulate( + std::begin(arglists), std::end(arglists), std::size_t{1}, + [](const std::size_t res, const std::vector& arglist) { + return res * arglist.size(); + }); + std::vector args; + args.reserve(arglists.size()); + for (std::size_t i = 0; i < total; i++) { + for (std::size_t arg = 0; arg < arglists.size(); arg++) { + args.push_back(arglists[arg][indices[arg]]); + } + args_.push_back(args); + args.clear(); + + std::size_t arg = 0; + do { + indices[arg] = (indices[arg] + 1) % arglists[arg].size(); + } while (indices[arg++] == 0 && arg < arglists.size()); + } + + return this; +} + +Benchmark* Benchmark::ArgName(const std::string& name) { + CHECK(ArgsCnt() == -1 || ArgsCnt() == 1); + arg_names_ = {name}; + return this; +} + +Benchmark* Benchmark::ArgNames(const std::vector& names) { + CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast(names.size())); + arg_names_ = names; + return this; +} + +Benchmark* Benchmark::DenseRange(int64_t start, int64_t limit, int step) { + CHECK(ArgsCnt() == -1 || ArgsCnt() == 1); + CHECK_LE(start, limit); + for (int64_t arg = start; arg <= limit; arg += step) { + args_.push_back({arg}); + } + return this; +} + +Benchmark* Benchmark::Args(const std::vector& args) { + CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast(args.size())); + args_.push_back(args); + return this; +} + +Benchmark* Benchmark::Apply(void (*custom_arguments)(Benchmark* benchmark)) { + custom_arguments(this); + return this; +} + +Benchmark* Benchmark::RangeMultiplier(int multiplier) { + CHECK(multiplier > 1); + range_multiplier_ = multiplier; + return this; +} + +Benchmark* Benchmark::MinTime(double t) { + CHECK(t > 0.0); + CHECK(iterations_ == 0); + min_time_ = t; + return this; +} + +Benchmark* Benchmark::Iterations(IterationCount n) { + CHECK(n > 0); + CHECK(IsZero(min_time_)); + iterations_ = n; + return this; +} + +Benchmark* Benchmark::Repetitions(int n) { + CHECK(n > 0); + repetitions_ = n; + return this; +} + +Benchmark* Benchmark::ReportAggregatesOnly(bool value) { + aggregation_report_mode_ = value ? ARM_ReportAggregatesOnly : ARM_Default; + return this; +} + +Benchmark* Benchmark::DisplayAggregatesOnly(bool value) { + // If we were called, the report mode is no longer 'unspecified', in any case. + aggregation_report_mode_ = static_cast( + aggregation_report_mode_ | ARM_Default); + + if (value) { + aggregation_report_mode_ = static_cast( + aggregation_report_mode_ | ARM_DisplayReportAggregatesOnly); + } else { + aggregation_report_mode_ = static_cast( + aggregation_report_mode_ & ~ARM_DisplayReportAggregatesOnly); + } + + return this; +} + +Benchmark* Benchmark::MeasureProcessCPUTime() { + // Can be used together with UseRealTime() / UseManualTime(). + measure_process_cpu_time_ = true; + return this; +} + +Benchmark* Benchmark::UseRealTime() { + CHECK(!use_manual_time_) + << "Cannot set UseRealTime and UseManualTime simultaneously."; + use_real_time_ = true; + return this; +} + +Benchmark* Benchmark::UseManualTime() { + CHECK(!use_real_time_) + << "Cannot set UseRealTime and UseManualTime simultaneously."; + use_manual_time_ = true; + return this; +} + +Benchmark* Benchmark::Complexity(BigO complexity) { + complexity_ = complexity; + return this; +} + +Benchmark* Benchmark::Complexity(BigOFunc* complexity) { + complexity_lambda_ = complexity; + complexity_ = oLambda; + return this; +} + +Benchmark* Benchmark::ComputeStatistics(std::string name, + StatisticsFunc* statistics) { + statistics_.emplace_back(name, statistics); + return this; +} + +Benchmark* Benchmark::Threads(int t) { + CHECK_GT(t, 0); + thread_counts_.push_back(t); + return this; +} + +Benchmark* Benchmark::ThreadRange(int min_threads, int max_threads) { + CHECK_GT(min_threads, 0); + CHECK_GE(max_threads, min_threads); + + AddRange(&thread_counts_, min_threads, max_threads, 2); + return this; +} + +Benchmark* Benchmark::DenseThreadRange(int min_threads, int max_threads, + int stride) { + CHECK_GT(min_threads, 0); + CHECK_GE(max_threads, min_threads); + CHECK_GE(stride, 1); + + for (auto i = min_threads; i < max_threads; i += stride) { + thread_counts_.push_back(i); + } + thread_counts_.push_back(max_threads); + return this; +} + +Benchmark* Benchmark::ThreadPerCpu() { + thread_counts_.push_back(CPUInfo::Get().num_cpus); + return this; +} + +void Benchmark::SetName(const char* name) { name_ = name; } + +int Benchmark::ArgsCnt() const { + if (args_.empty()) { + if (arg_names_.empty()) return -1; + return static_cast(arg_names_.size()); + } + return static_cast(args_.front().size()); +} + +//=============================================================================// +// FunctionBenchmark +//=============================================================================// + +void FunctionBenchmark::Run(State& st) { func_(st); } + +} // end namespace internal + +void ClearRegisteredBenchmarks() { + internal::BenchmarkFamilies::GetInstance()->ClearBenchmarks(); +} + +} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/benchmark_register.h b/benchmarks/thirdparty/benchmark/src/benchmark_register.h new file mode 100755 index 0000000000..61377d7423 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/benchmark_register.h @@ -0,0 +1,107 @@ +#ifndef BENCHMARK_REGISTER_H +#define BENCHMARK_REGISTER_H + +#include + +#include "check.h" + +namespace benchmark { +namespace internal { + +// Append the powers of 'mult' in the closed interval [lo, hi]. +// Returns iterator to the start of the inserted range. +template +typename std::vector::iterator +AddPowers(std::vector* dst, T lo, T hi, int mult) { + CHECK_GE(lo, 0); + CHECK_GE(hi, lo); + CHECK_GE(mult, 2); + + const size_t start_offset = dst->size(); + + static const T kmax = std::numeric_limits::max(); + + // Space out the values in multiples of "mult" + for (T i = 1; i <= hi; i *= mult) { + if (i >= lo) { + dst->push_back(i); + } + // Break the loop here since multiplying by + // 'mult' would move outside of the range of T + if (i > kmax / mult) break; + } + + return dst->begin() + start_offset; +} + +template +void AddNegatedPowers(std::vector* dst, T lo, T hi, int mult) { + // We negate lo and hi so we require that they cannot be equal to 'min'. + CHECK_GT(lo, std::numeric_limits::min()); + CHECK_GT(hi, std::numeric_limits::min()); + CHECK_GE(hi, lo); + CHECK_LE(hi, 0); + + // Add positive powers, then negate and reverse. + // Casts necessary since small integers get promoted + // to 'int' when negating. + const auto lo_complement = static_cast(-lo); + const auto hi_complement = static_cast(-hi); + + const auto it = AddPowers(dst, hi_complement, lo_complement, mult); + + std::for_each(it, dst->end(), [](T& t) { t *= -1; }); + std::reverse(it, dst->end()); +} + +template +void AddRange(std::vector* dst, T lo, T hi, int mult) { + static_assert(std::is_integral::value && std::is_signed::value, + "Args type must be a signed integer"); + + CHECK_GE(hi, lo); + CHECK_GE(mult, 2); + + // Add "lo" + dst->push_back(lo); + + // Handle lo == hi as a special case, so we then know + // lo < hi and so it is safe to add 1 to lo and subtract 1 + // from hi without falling outside of the range of T. + if (lo == hi) return; + + // Ensure that lo_inner <= hi_inner below. + if (lo + 1 == hi) { + dst->push_back(hi); + return; + } + + // Add all powers of 'mult' in the range [lo+1, hi-1] (inclusive). + const auto lo_inner = static_cast(lo + 1); + const auto hi_inner = static_cast(hi - 1); + + // Insert negative values + if (lo_inner < 0) { + AddNegatedPowers(dst, lo_inner, std::min(hi_inner, T{-1}), mult); + } + + // Treat 0 as a special case (see discussion on #762). + if (lo <= 0 && hi >= 0) { + dst->push_back(0); + } + + // Insert positive values + if (hi_inner > 0) { + AddPowers(dst, std::max(lo_inner, T{1}), hi_inner, mult); + } + + // Add "hi" (if different from last value). + if (hi != dst->back()) { + dst->push_back(hi); + } +} + +} // namespace internal +} // namespace benchmark + +#endif // BENCHMARK_REGISTER_H diff --git a/benchmarks/thirdparty/benchmark/src/benchmark_runner.cc b/benchmarks/thirdparty/benchmark/src/benchmark_runner.cc new file mode 100755 index 0000000000..7bc6b6329e --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/benchmark_runner.cc @@ -0,0 +1,362 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "benchmark_runner.h" +#include "benchmark/benchmark.h" +#include "benchmark_api_internal.h" +#include "internal_macros.h" + +#ifndef BENCHMARK_OS_WINDOWS +#ifndef BENCHMARK_OS_FUCHSIA +#include +#endif +#include +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "check.h" +#include "colorprint.h" +#include "commandlineflags.h" +#include "complexity.h" +#include "counter.h" +#include "internal_macros.h" +#include "log.h" +#include "mutex.h" +#include "re.h" +#include "statistics.h" +#include "string_util.h" +#include "thread_manager.h" +#include "thread_timer.h" + +namespace benchmark { + +namespace internal { + +MemoryManager* memory_manager = nullptr; + +namespace { + +static constexpr IterationCount kMaxIterations = 1000000000; + +BenchmarkReporter::Run CreateRunReport( + const benchmark::internal::BenchmarkInstance& b, + const internal::ThreadManager::Result& results, + IterationCount memory_iterations, + const MemoryManager::Result& memory_result, double seconds, + int64_t repetition_index) { + // Create report about this benchmark run. + BenchmarkReporter::Run report; + + report.run_name = b.name; + report.error_occurred = results.has_error_; + report.error_message = results.error_message_; + report.report_label = results.report_label_; + // This is the total iterations across all threads. + report.iterations = results.iterations; + report.time_unit = b.time_unit; + report.threads = b.threads; + report.repetition_index = repetition_index; + report.repetitions = b.repetitions; + + if (!report.error_occurred) { + if (b.use_manual_time) { + report.real_accumulated_time = results.manual_time_used; + } else { + report.real_accumulated_time = results.real_time_used; + } + report.cpu_accumulated_time = results.cpu_time_used; + report.complexity_n = results.complexity_n; + report.complexity = b.complexity; + report.complexity_lambda = b.complexity_lambda; + report.statistics = b.statistics; + report.counters = results.counters; + + if (memory_iterations > 0) { + report.has_memory_result = true; + report.allocs_per_iter = + memory_iterations ? static_cast(memory_result.num_allocs) / + memory_iterations + : 0; + report.max_bytes_used = memory_result.max_bytes_used; + } + + internal::Finish(&report.counters, results.iterations, seconds, b.threads); + } + return report; +} + +// Execute one thread of benchmark b for the specified number of iterations. +// Adds the stats collected for the thread into *total. +void RunInThread(const BenchmarkInstance* b, IterationCount iters, + int thread_id, ThreadManager* manager) { + internal::ThreadTimer timer( + b->measure_process_cpu_time + ? internal::ThreadTimer::CreateProcessCpuTime() + : internal::ThreadTimer::Create()); + State st = b->Run(iters, thread_id, &timer, manager); + CHECK(st.error_occurred() || st.iterations() >= st.max_iterations) + << "Benchmark returned before State::KeepRunning() returned false!"; + { + MutexLock l(manager->GetBenchmarkMutex()); + internal::ThreadManager::Result& results = manager->results; + results.iterations += st.iterations(); + results.cpu_time_used += timer.cpu_time_used(); + results.real_time_used += timer.real_time_used(); + results.manual_time_used += timer.manual_time_used(); + results.complexity_n += st.complexity_length_n(); + internal::Increment(&results.counters, st.counters); + } + manager->NotifyThreadComplete(); +} + +class BenchmarkRunner { + public: + BenchmarkRunner(const benchmark::internal::BenchmarkInstance& b_, + std::vector* complexity_reports_) + : b(b_), + complexity_reports(*complexity_reports_), + min_time(!IsZero(b.min_time) ? b.min_time : FLAGS_benchmark_min_time), + repeats(b.repetitions != 0 ? b.repetitions + : FLAGS_benchmark_repetitions), + has_explicit_iteration_count(b.iterations != 0), + pool(b.threads - 1), + iters(has_explicit_iteration_count ? b.iterations : 1) { + run_results.display_report_aggregates_only = + (FLAGS_benchmark_report_aggregates_only || + FLAGS_benchmark_display_aggregates_only); + run_results.file_report_aggregates_only = + FLAGS_benchmark_report_aggregates_only; + if (b.aggregation_report_mode != internal::ARM_Unspecified) { + run_results.display_report_aggregates_only = + (b.aggregation_report_mode & + internal::ARM_DisplayReportAggregatesOnly); + run_results.file_report_aggregates_only = + (b.aggregation_report_mode & internal::ARM_FileReportAggregatesOnly); + } + + for (int repetition_num = 0; repetition_num < repeats; repetition_num++) { + DoOneRepetition(repetition_num); + } + + // Calculate additional statistics + run_results.aggregates_only = ComputeStats(run_results.non_aggregates); + + // Maybe calculate complexity report + if ((b.complexity != oNone) && b.last_benchmark_instance) { + auto additional_run_stats = ComputeBigO(complexity_reports); + run_results.aggregates_only.insert(run_results.aggregates_only.end(), + additional_run_stats.begin(), + additional_run_stats.end()); + complexity_reports.clear(); + } + } + + RunResults&& get_results() { return std::move(run_results); } + + private: + RunResults run_results; + + const benchmark::internal::BenchmarkInstance& b; + std::vector& complexity_reports; + + const double min_time; + const int repeats; + const bool has_explicit_iteration_count; + + std::vector pool; + + IterationCount iters; // preserved between repetitions! + // So only the first repetition has to find/calculate it, + // the other repetitions will just use that precomputed iteration count. + + struct IterationResults { + internal::ThreadManager::Result results; + IterationCount iters; + double seconds; + }; + IterationResults DoNIterations() { + VLOG(2) << "Running " << b.name.str() << " for " << iters << "\n"; + + std::unique_ptr manager; + manager.reset(new internal::ThreadManager(b.threads)); + + // Run all but one thread in separate threads + for (std::size_t ti = 0; ti < pool.size(); ++ti) { + pool[ti] = std::thread(&RunInThread, &b, iters, static_cast(ti + 1), + manager.get()); + } + // And run one thread here directly. + // (If we were asked to run just one thread, we don't create new threads.) + // Yes, we need to do this here *after* we start the separate threads. + RunInThread(&b, iters, 0, manager.get()); + + // The main thread has finished. Now let's wait for the other threads. + manager->WaitForAllThreads(); + for (std::thread& thread : pool) thread.join(); + + IterationResults i; + // Acquire the measurements/counters from the manager, UNDER THE LOCK! + { + MutexLock l(manager->GetBenchmarkMutex()); + i.results = manager->results; + } + + // And get rid of the manager. + manager.reset(); + + // Adjust real/manual time stats since they were reported per thread. + i.results.real_time_used /= b.threads; + i.results.manual_time_used /= b.threads; + // If we were measuring whole-process CPU usage, adjust the CPU time too. + if (b.measure_process_cpu_time) i.results.cpu_time_used /= b.threads; + + VLOG(2) << "Ran in " << i.results.cpu_time_used << "/" + << i.results.real_time_used << "\n"; + + // So for how long were we running? + i.iters = iters; + // Base decisions off of real time if requested by this benchmark. + i.seconds = i.results.cpu_time_used; + if (b.use_manual_time) { + i.seconds = i.results.manual_time_used; + } else if (b.use_real_time) { + i.seconds = i.results.real_time_used; + } + + return i; + } + + IterationCount PredictNumItersNeeded(const IterationResults& i) const { + // See how much iterations should be increased by. + // Note: Avoid division by zero with max(seconds, 1ns). + double multiplier = min_time * 1.4 / std::max(i.seconds, 1e-9); + // If our last run was at least 10% of FLAGS_benchmark_min_time then we + // use the multiplier directly. + // Otherwise we use at most 10 times expansion. + // NOTE: When the last run was at least 10% of the min time the max + // expansion should be 14x. + bool is_significant = (i.seconds / min_time) > 0.1; + multiplier = is_significant ? multiplier : std::min(10.0, multiplier); + if (multiplier <= 1.0) multiplier = 2.0; + + // So what seems to be the sufficiently-large iteration count? Round up. + const IterationCount max_next_iters = static_cast( + std::lround(std::max(multiplier * static_cast(i.iters), + static_cast(i.iters) + 1.0))); + // But we do have *some* sanity limits though.. + const IterationCount next_iters = std::min(max_next_iters, kMaxIterations); + + VLOG(3) << "Next iters: " << next_iters << ", " << multiplier << "\n"; + return next_iters; // round up before conversion to integer. + } + + bool ShouldReportIterationResults(const IterationResults& i) const { + // Determine if this run should be reported; + // Either it has run for a sufficient amount of time + // or because an error was reported. + return i.results.has_error_ || + i.iters >= kMaxIterations || // Too many iterations already. + i.seconds >= min_time || // The elapsed time is large enough. + // CPU time is specified but the elapsed real time greatly exceeds + // the minimum time. + // Note that user provided timers are except from this sanity check. + ((i.results.real_time_used >= 5 * min_time) && !b.use_manual_time); + } + + void DoOneRepetition(int64_t repetition_index) { + const bool is_the_first_repetition = repetition_index == 0; + IterationResults i; + + // We *may* be gradually increasing the length (iteration count) + // of the benchmark until we decide the results are significant. + // And once we do, we report those last results and exit. + // Please do note that the if there are repetitions, the iteration count + // is *only* calculated for the *first* repetition, and other repetitions + // simply use that precomputed iteration count. + for (;;) { + i = DoNIterations(); + + // Do we consider the results to be significant? + // If we are doing repetitions, and the first repetition was already done, + // it has calculated the correct iteration time, so we have run that very + // iteration count just now. No need to calculate anything. Just report. + // Else, the normal rules apply. + const bool results_are_significant = !is_the_first_repetition || + has_explicit_iteration_count || + ShouldReportIterationResults(i); + + if (results_are_significant) break; // Good, let's report them! + + // Nope, bad iteration. Let's re-estimate the hopefully-sufficient + // iteration count, and run the benchmark again... + + iters = PredictNumItersNeeded(i); + assert(iters > i.iters && + "if we did more iterations than we want to do the next time, " + "then we should have accepted the current iteration run."); + } + + // Oh, one last thing, we need to also produce the 'memory measurements'.. + MemoryManager::Result memory_result; + IterationCount memory_iterations = 0; + if (memory_manager != nullptr) { + // Only run a few iterations to reduce the impact of one-time + // allocations in benchmarks that are not properly managed. + memory_iterations = std::min(16, iters); + memory_manager->Start(); + std::unique_ptr manager; + manager.reset(new internal::ThreadManager(1)); + RunInThread(&b, memory_iterations, 0, manager.get()); + manager->WaitForAllThreads(); + manager.reset(); + + memory_manager->Stop(&memory_result); + } + + // Ok, now actualy report. + BenchmarkReporter::Run report = + CreateRunReport(b, i.results, memory_iterations, memory_result, + i.seconds, repetition_index); + + if (!report.error_occurred && b.complexity != oNone) + complexity_reports.push_back(report); + + run_results.non_aggregates.push_back(report); + } +}; + +} // end namespace + +RunResults RunBenchmark( + const benchmark::internal::BenchmarkInstance& b, + std::vector* complexity_reports) { + internal::BenchmarkRunner r(b, complexity_reports); + return r.get_results(); +} + +} // end namespace internal + +} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/benchmark_runner.h b/benchmarks/thirdparty/benchmark/src/benchmark_runner.h new file mode 100755 index 0000000000..96e8282a11 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/benchmark_runner.h @@ -0,0 +1,51 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef BENCHMARK_RUNNER_H_ +#define BENCHMARK_RUNNER_H_ + +#include "benchmark_api_internal.h" +#include "internal_macros.h" + +DECLARE_double(benchmark_min_time); + +DECLARE_int32(benchmark_repetitions); + +DECLARE_bool(benchmark_report_aggregates_only); + +DECLARE_bool(benchmark_display_aggregates_only); + +namespace benchmark { + +namespace internal { + +extern MemoryManager* memory_manager; + +struct RunResults { + std::vector non_aggregates; + std::vector aggregates_only; + + bool display_report_aggregates_only = false; + bool file_report_aggregates_only = false; +}; + +RunResults RunBenchmark( + const benchmark::internal::BenchmarkInstance& b, + std::vector* complexity_reports); + +} // namespace internal + +} // end namespace benchmark + +#endif // BENCHMARK_RUNNER_H_ diff --git a/benchmarks/thirdparty/benchmark/src/check.h b/benchmarks/thirdparty/benchmark/src/check.h new file mode 100755 index 0000000000..f5f8253f80 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/check.h @@ -0,0 +1,82 @@ +#ifndef CHECK_H_ +#define CHECK_H_ + +#include +#include +#include + +#include "internal_macros.h" +#include "log.h" + +namespace benchmark { +namespace internal { + +typedef void(AbortHandlerT)(); + +inline AbortHandlerT*& GetAbortHandler() { + static AbortHandlerT* handler = &std::abort; + return handler; +} + +BENCHMARK_NORETURN inline void CallAbortHandler() { + GetAbortHandler()(); + std::abort(); // fallback to enforce noreturn +} + +// CheckHandler is the class constructed by failing CHECK macros. CheckHandler +// will log information about the failures and abort when it is destructed. +class CheckHandler { + public: + CheckHandler(const char* check, const char* file, const char* func, int line) + : log_(GetErrorLogInstance()) { + log_ << file << ":" << line << ": " << func << ": Check `" << check + << "' failed. "; + } + + LogType& GetLog() { return log_; } + + BENCHMARK_NORETURN ~CheckHandler() BENCHMARK_NOEXCEPT_OP(false) { + log_ << std::endl; + CallAbortHandler(); + } + + CheckHandler& operator=(const CheckHandler&) = delete; + CheckHandler(const CheckHandler&) = delete; + CheckHandler() = delete; + + private: + LogType& log_; +}; + +} // end namespace internal +} // end namespace benchmark + +// The CHECK macro returns a std::ostream object that can have extra information +// written to it. +#ifndef NDEBUG +#define CHECK(b) \ + (b ? ::benchmark::internal::GetNullLogInstance() \ + : ::benchmark::internal::CheckHandler(#b, __FILE__, __func__, __LINE__) \ + .GetLog()) +#else +#define CHECK(b) ::benchmark::internal::GetNullLogInstance() +#endif + +// clang-format off +// preserve whitespacing between operators for alignment +#define CHECK_EQ(a, b) CHECK((a) == (b)) +#define CHECK_NE(a, b) CHECK((a) != (b)) +#define CHECK_GE(a, b) CHECK((a) >= (b)) +#define CHECK_LE(a, b) CHECK((a) <= (b)) +#define CHECK_GT(a, b) CHECK((a) > (b)) +#define CHECK_LT(a, b) CHECK((a) < (b)) + +#define CHECK_FLOAT_EQ(a, b, eps) CHECK(std::fabs((a) - (b)) < (eps)) +#define CHECK_FLOAT_NE(a, b, eps) CHECK(std::fabs((a) - (b)) >= (eps)) +#define CHECK_FLOAT_GE(a, b, eps) CHECK((a) - (b) > -(eps)) +#define CHECK_FLOAT_LE(a, b, eps) CHECK((b) - (a) > -(eps)) +#define CHECK_FLOAT_GT(a, b, eps) CHECK((a) - (b) > (eps)) +#define CHECK_FLOAT_LT(a, b, eps) CHECK((b) - (a) > (eps)) +//clang-format on + +#endif // CHECK_H_ diff --git a/benchmarks/thirdparty/benchmark/src/colorprint.cc b/benchmarks/thirdparty/benchmark/src/colorprint.cc new file mode 100755 index 0000000000..fff6a98818 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/colorprint.cc @@ -0,0 +1,188 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "colorprint.h" + +#include +#include +#include +#include +#include +#include + +#include "check.h" +#include "internal_macros.h" + +#ifdef BENCHMARK_OS_WINDOWS +#include +#include +#else +#include +#endif // BENCHMARK_OS_WINDOWS + +namespace benchmark { +namespace { +#ifdef BENCHMARK_OS_WINDOWS +typedef WORD PlatformColorCode; +#else +typedef const char* PlatformColorCode; +#endif + +PlatformColorCode GetPlatformColorCode(LogColor color) { +#ifdef BENCHMARK_OS_WINDOWS + switch (color) { + case COLOR_RED: + return FOREGROUND_RED; + case COLOR_GREEN: + return FOREGROUND_GREEN; + case COLOR_YELLOW: + return FOREGROUND_RED | FOREGROUND_GREEN; + case COLOR_BLUE: + return FOREGROUND_BLUE; + case COLOR_MAGENTA: + return FOREGROUND_BLUE | FOREGROUND_RED; + case COLOR_CYAN: + return FOREGROUND_BLUE | FOREGROUND_GREEN; + case COLOR_WHITE: // fall through to default + default: + return 0; + } +#else + switch (color) { + case COLOR_RED: + return "1"; + case COLOR_GREEN: + return "2"; + case COLOR_YELLOW: + return "3"; + case COLOR_BLUE: + return "4"; + case COLOR_MAGENTA: + return "5"; + case COLOR_CYAN: + return "6"; + case COLOR_WHITE: + return "7"; + default: + return nullptr; + }; +#endif +} + +} // end namespace + +std::string FormatString(const char* msg, va_list args) { + // we might need a second shot at this, so pre-emptivly make a copy + va_list args_cp; + va_copy(args_cp, args); + + std::size_t size = 256; + char local_buff[256]; + auto ret = vsnprintf(local_buff, size, msg, args_cp); + + va_end(args_cp); + + // currently there is no error handling for failure, so this is hack. + CHECK(ret >= 0); + + if (ret == 0) // handle empty expansion + return {}; + else if (static_cast(ret) < size) + return local_buff; + else { + // we did not provide a long enough buffer on our first attempt. + size = (size_t)ret + 1; // + 1 for the null byte + std::unique_ptr buff(new char[size]); + ret = vsnprintf(buff.get(), size, msg, args); + CHECK(ret > 0 && ((size_t)ret) < size); + return buff.get(); + } +} + +std::string FormatString(const char* msg, ...) { + va_list args; + va_start(args, msg); + auto tmp = FormatString(msg, args); + va_end(args); + return tmp; +} + +void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, ...) { + va_list args; + va_start(args, fmt); + ColorPrintf(out, color, fmt, args); + va_end(args); +} + +void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, + va_list args) { +#ifdef BENCHMARK_OS_WINDOWS + ((void)out); // suppress unused warning + + const HANDLE stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE); + + // Gets the current text color. + CONSOLE_SCREEN_BUFFER_INFO buffer_info; + GetConsoleScreenBufferInfo(stdout_handle, &buffer_info); + const WORD old_color_attrs = buffer_info.wAttributes; + + // We need to flush the stream buffers into the console before each + // SetConsoleTextAttribute call lest it affect the text that is already + // printed but has not yet reached the console. + fflush(stdout); + SetConsoleTextAttribute(stdout_handle, + GetPlatformColorCode(color) | FOREGROUND_INTENSITY); + vprintf(fmt, args); + + fflush(stdout); + // Restores the text color. + SetConsoleTextAttribute(stdout_handle, old_color_attrs); +#else + const char* color_code = GetPlatformColorCode(color); + if (color_code) out << FormatString("\033[0;3%sm", color_code); + out << FormatString(fmt, args) << "\033[m"; +#endif +} + +bool IsColorTerminal() { +#if BENCHMARK_OS_WINDOWS + // On Windows the TERM variable is usually not set, but the + // console there does support colors. + return 0 != _isatty(_fileno(stdout)); +#else + // On non-Windows platforms, we rely on the TERM variable. This list of + // supported TERM values is copied from Google Test: + // . + const char* const SUPPORTED_TERM_VALUES[] = { + "xterm", "xterm-color", "xterm-256color", + "screen", "screen-256color", "tmux", + "tmux-256color", "rxvt-unicode", "rxvt-unicode-256color", + "linux", "cygwin", + }; + + const char* const term = getenv("TERM"); + + bool term_supports_color = false; + for (const char* candidate : SUPPORTED_TERM_VALUES) { + if (term && 0 == strcmp(term, candidate)) { + term_supports_color = true; + break; + } + } + + return 0 != isatty(fileno(stdout)) && term_supports_color; +#endif // BENCHMARK_OS_WINDOWS +} + +} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/colorprint.h b/benchmarks/thirdparty/benchmark/src/colorprint.h new file mode 100755 index 0000000000..9f6fab9b34 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/colorprint.h @@ -0,0 +1,33 @@ +#ifndef BENCHMARK_COLORPRINT_H_ +#define BENCHMARK_COLORPRINT_H_ + +#include +#include +#include + +namespace benchmark { +enum LogColor { + COLOR_DEFAULT, + COLOR_RED, + COLOR_GREEN, + COLOR_YELLOW, + COLOR_BLUE, + COLOR_MAGENTA, + COLOR_CYAN, + COLOR_WHITE +}; + +std::string FormatString(const char* msg, va_list args); +std::string FormatString(const char* msg, ...); + +void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, + va_list args); +void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, ...); + +// Returns true if stdout appears to be a terminal that supports colored +// output, false otherwise. +bool IsColorTerminal(); + +} // end namespace benchmark + +#endif // BENCHMARK_COLORPRINT_H_ diff --git a/benchmarks/thirdparty/benchmark/src/commandlineflags.cc b/benchmarks/thirdparty/benchmark/src/commandlineflags.cc new file mode 100755 index 0000000000..0648fe3a06 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/commandlineflags.cc @@ -0,0 +1,228 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "commandlineflags.h" + +#include +#include +#include +#include +#include +#include + +namespace benchmark { +namespace { + +// Parses 'str' for a 32-bit signed integer. If successful, writes +// the result to *value and returns true; otherwise leaves *value +// unchanged and returns false. +bool ParseInt32(const std::string& src_text, const char* str, int32_t* value) { + // Parses the environment variable as a decimal integer. + char* end = nullptr; + const long long_value = strtol(str, &end, 10); // NOLINT + + // Has strtol() consumed all characters in the string? + if (*end != '\0') { + // No - an invalid character was encountered. + std::cerr << src_text << " is expected to be a 32-bit integer, " + << "but actually has value \"" << str << "\".\n"; + return false; + } + + // Is the parsed value in the range of an Int32? + const int32_t result = static_cast(long_value); + if (long_value == std::numeric_limits::max() || + long_value == std::numeric_limits::min() || + // The parsed value overflows as a long. (strtol() returns + // LONG_MAX or LONG_MIN when the input overflows.) + result != long_value + // The parsed value overflows as an Int32. + ) { + std::cerr << src_text << " is expected to be a 32-bit integer, " + << "but actually has value \"" << str << "\", " + << "which overflows.\n"; + return false; + } + + *value = result; + return true; +} + +// Parses 'str' for a double. If successful, writes the result to *value and +// returns true; otherwise leaves *value unchanged and returns false. +bool ParseDouble(const std::string& src_text, const char* str, double* value) { + // Parses the environment variable as a decimal integer. + char* end = nullptr; + const double double_value = strtod(str, &end); // NOLINT + + // Has strtol() consumed all characters in the string? + if (*end != '\0') { + // No - an invalid character was encountered. + std::cerr << src_text << " is expected to be a double, " + << "but actually has value \"" << str << "\".\n"; + return false; + } + + *value = double_value; + return true; +} + +// Returns the name of the environment variable corresponding to the +// given flag. For example, FlagToEnvVar("foo") will return +// "BENCHMARK_FOO" in the open-source version. +static std::string FlagToEnvVar(const char* flag) { + const std::string flag_str(flag); + + std::string env_var; + for (size_t i = 0; i != flag_str.length(); ++i) + env_var += static_cast(::toupper(flag_str.c_str()[i])); + + return env_var; +} + +} // namespace + +bool BoolFromEnv(const char* flag, bool default_val) { + const std::string env_var = FlagToEnvVar(flag); + const char* const value_str = getenv(env_var.c_str()); + return value_str == nullptr ? default_val : IsTruthyFlagValue(value_str); +} + +int32_t Int32FromEnv(const char* flag, int32_t default_val) { + const std::string env_var = FlagToEnvVar(flag); + const char* const value_str = getenv(env_var.c_str()); + int32_t value = default_val; + if (value_str == nullptr || + !ParseInt32(std::string("Environment variable ") + env_var, value_str, + &value)) { + return default_val; + } + return value; +} + +double DoubleFromEnv(const char* flag, double default_val) { + const std::string env_var = FlagToEnvVar(flag); + const char* const value_str = getenv(env_var.c_str()); + double value = default_val; + if (value_str == nullptr || + !ParseDouble(std::string("Environment variable ") + env_var, value_str, + &value)) { + return default_val; + } + return value; +} + +const char* StringFromEnv(const char* flag, const char* default_val) { + const std::string env_var = FlagToEnvVar(flag); + const char* const value = getenv(env_var.c_str()); + return value == nullptr ? default_val : value; +} + +// Parses a string as a command line flag. The string should have +// the format "--flag=value". When def_optional is true, the "=value" +// part can be omitted. +// +// Returns the value of the flag, or nullptr if the parsing failed. +const char* ParseFlagValue(const char* str, const char* flag, + bool def_optional) { + // str and flag must not be nullptr. + if (str == nullptr || flag == nullptr) return nullptr; + + // The flag must start with "--". + const std::string flag_str = std::string("--") + std::string(flag); + const size_t flag_len = flag_str.length(); + if (strncmp(str, flag_str.c_str(), flag_len) != 0) return nullptr; + + // Skips the flag name. + const char* flag_end = str + flag_len; + + // When def_optional is true, it's OK to not have a "=value" part. + if (def_optional && (flag_end[0] == '\0')) return flag_end; + + // If def_optional is true and there are more characters after the + // flag name, or if def_optional is false, there must be a '=' after + // the flag name. + if (flag_end[0] != '=') return nullptr; + + // Returns the string after "=". + return flag_end + 1; +} + +bool ParseBoolFlag(const char* str, const char* flag, bool* value) { + // Gets the value of the flag as a string. + const char* const value_str = ParseFlagValue(str, flag, true); + + // Aborts if the parsing failed. + if (value_str == nullptr) return false; + + // Converts the string value to a bool. + *value = IsTruthyFlagValue(value_str); + return true; +} + +bool ParseInt32Flag(const char* str, const char* flag, int32_t* value) { + // Gets the value of the flag as a string. + const char* const value_str = ParseFlagValue(str, flag, false); + + // Aborts if the parsing failed. + if (value_str == nullptr) return false; + + // Sets *value to the value of the flag. + return ParseInt32(std::string("The value of flag --") + flag, value_str, + value); +} + +bool ParseDoubleFlag(const char* str, const char* flag, double* value) { + // Gets the value of the flag as a string. + const char* const value_str = ParseFlagValue(str, flag, false); + + // Aborts if the parsing failed. + if (value_str == nullptr) return false; + + // Sets *value to the value of the flag. + return ParseDouble(std::string("The value of flag --") + flag, value_str, + value); +} + +bool ParseStringFlag(const char* str, const char* flag, std::string* value) { + // Gets the value of the flag as a string. + const char* const value_str = ParseFlagValue(str, flag, false); + + // Aborts if the parsing failed. + if (value_str == nullptr) return false; + + *value = value_str; + return true; +} + +bool IsFlag(const char* str, const char* flag) { + return (ParseFlagValue(str, flag, true) != nullptr); +} + +bool IsTruthyFlagValue(const std::string& value) { + if (value.size() == 1) { + char v = value[0]; + return isalnum(v) && + !(v == '0' || v == 'f' || v == 'F' || v == 'n' || v == 'N'); + } else if (!value.empty()) { + std::string value_lower(value); + std::transform(value_lower.begin(), value_lower.end(), value_lower.begin(), + [](char c) { return static_cast(::tolower(c)); }); + return !(value_lower == "false" || value_lower == "no" || + value_lower == "off"); + } else + return true; +} + +} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/commandlineflags.h b/benchmarks/thirdparty/benchmark/src/commandlineflags.h new file mode 100755 index 0000000000..3a1f6a8dbc --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/commandlineflags.h @@ -0,0 +1,103 @@ +#ifndef BENCHMARK_COMMANDLINEFLAGS_H_ +#define BENCHMARK_COMMANDLINEFLAGS_H_ + +#include +#include + +// Macro for referencing flags. +#define FLAG(name) FLAGS_##name + +// Macros for declaring flags. +#define DECLARE_bool(name) extern bool FLAG(name) +#define DECLARE_int32(name) extern int32_t FLAG(name) +#define DECLARE_double(name) extern double FLAG(name) +#define DECLARE_string(name) extern std::string FLAG(name) + +// Macros for defining flags. +#define DEFINE_bool(name, default_val) \ + bool FLAG(name) = \ + benchmark::BoolFromEnv(#name, default_val) +#define DEFINE_int32(name, default_val) \ + int32_t FLAG(name) = \ + benchmark::Int32FromEnv(#name, default_val) +#define DEFINE_double(name, default_val) \ + double FLAG(name) = \ + benchmark::DoubleFromEnv(#name, default_val) +#define DEFINE_string(name, default_val) \ + std::string FLAG(name) = \ + benchmark::StringFromEnv(#name, default_val) + +namespace benchmark { + +// Parses a bool from the environment variable +// corresponding to the given flag. +// +// If the variable exists, returns IsTruthyFlagValue() value; if not, +// returns the given default value. +bool BoolFromEnv(const char* flag, bool default_val); + +// Parses an Int32 from the environment variable +// corresponding to the given flag. +// +// If the variable exists, returns ParseInt32() value; if not, returns +// the given default value. +int32_t Int32FromEnv(const char* flag, int32_t default_val); + +// Parses an Double from the environment variable +// corresponding to the given flag. +// +// If the variable exists, returns ParseDouble(); if not, returns +// the given default value. +double DoubleFromEnv(const char* flag, double default_val); + +// Parses a string from the environment variable +// corresponding to the given flag. +// +// If variable exists, returns its value; if not, returns +// the given default value. +const char* StringFromEnv(const char* flag, const char* default_val); + +// Parses a string for a bool flag, in the form of either +// "--flag=value" or "--flag". +// +// In the former case, the value is taken as true if it passes IsTruthyValue(). +// +// In the latter case, the value is taken as true. +// +// On success, stores the value of the flag in *value, and returns +// true. On failure, returns false without changing *value. +bool ParseBoolFlag(const char* str, const char* flag, bool* value); + +// Parses a string for an Int32 flag, in the form of +// "--flag=value". +// +// On success, stores the value of the flag in *value, and returns +// true. On failure, returns false without changing *value. +bool ParseInt32Flag(const char* str, const char* flag, int32_t* value); + +// Parses a string for a Double flag, in the form of +// "--flag=value". +// +// On success, stores the value of the flag in *value, and returns +// true. On failure, returns false without changing *value. +bool ParseDoubleFlag(const char* str, const char* flag, double* value); + +// Parses a string for a string flag, in the form of +// "--flag=value". +// +// On success, stores the value of the flag in *value, and returns +// true. On failure, returns false without changing *value. +bool ParseStringFlag(const char* str, const char* flag, std::string* value); + +// Returns true if the string matches the flag. +bool IsFlag(const char* str, const char* flag); + +// Returns true unless value starts with one of: '0', 'f', 'F', 'n' or 'N', or +// some non-alphanumeric character. Also returns false if the value matches +// one of 'no', 'false', 'off' (case-insensitive). As a special case, also +// returns true if value is the empty string. +bool IsTruthyFlagValue(const std::string& value); + +} // end namespace benchmark + +#endif // BENCHMARK_COMMANDLINEFLAGS_H_ diff --git a/benchmarks/thirdparty/benchmark/src/complexity.cc b/benchmarks/thirdparty/benchmark/src/complexity.cc new file mode 100755 index 0000000000..aeed67f0c7 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/complexity.cc @@ -0,0 +1,238 @@ +// Copyright 2016 Ismael Jimenez Martinez. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Source project : https://github.com/ismaelJimenez/cpp.leastsq +// Adapted to be used with google benchmark + +#include "benchmark/benchmark.h" + +#include +#include +#include "check.h" +#include "complexity.h" + +namespace benchmark { + +// Internal function to calculate the different scalability forms +BigOFunc* FittingCurve(BigO complexity) { + static const double kLog2E = 1.44269504088896340736; + switch (complexity) { + case oN: + return [](IterationCount n) -> double { return static_cast(n); }; + case oNSquared: + return [](IterationCount n) -> double { return std::pow(n, 2); }; + case oNCubed: + return [](IterationCount n) -> double { return std::pow(n, 3); }; + case oLogN: + /* Note: can't use log2 because Android's GNU STL lacks it */ + return + [](IterationCount n) { return kLog2E * log(static_cast(n)); }; + case oNLogN: + /* Note: can't use log2 because Android's GNU STL lacks it */ + return [](IterationCount n) { + return kLog2E * n * log(static_cast(n)); + }; + case o1: + default: + return [](IterationCount) { return 1.0; }; + } +} + +// Function to return an string for the calculated complexity +std::string GetBigOString(BigO complexity) { + switch (complexity) { + case oN: + return "N"; + case oNSquared: + return "N^2"; + case oNCubed: + return "N^3"; + case oLogN: + return "lgN"; + case oNLogN: + return "NlgN"; + case o1: + return "(1)"; + default: + return "f(N)"; + } +} + +// Find the coefficient for the high-order term in the running time, by +// minimizing the sum of squares of relative error, for the fitting curve +// given by the lambda expression. +// - n : Vector containing the size of the benchmark tests. +// - time : Vector containing the times for the benchmark tests. +// - fitting_curve : lambda expression (e.g. [](int64_t n) {return n; };). + +// For a deeper explanation on the algorithm logic, please refer to +// https://en.wikipedia.org/wiki/Least_squares#Least_squares,_regression_analysis_and_statistics + +LeastSq MinimalLeastSq(const std::vector& n, + const std::vector& time, + BigOFunc* fitting_curve) { + double sigma_gn = 0.0; + double sigma_gn_squared = 0.0; + double sigma_time = 0.0; + double sigma_time_gn = 0.0; + + // Calculate least square fitting parameter + for (size_t i = 0; i < n.size(); ++i) { + double gn_i = fitting_curve(n[i]); + sigma_gn += gn_i; + sigma_gn_squared += gn_i * gn_i; + sigma_time += time[i]; + sigma_time_gn += time[i] * gn_i; + } + + LeastSq result; + result.complexity = oLambda; + + // Calculate complexity. + result.coef = sigma_time_gn / sigma_gn_squared; + + // Calculate RMS + double rms = 0.0; + for (size_t i = 0; i < n.size(); ++i) { + double fit = result.coef * fitting_curve(n[i]); + rms += pow((time[i] - fit), 2); + } + + // Normalized RMS by the mean of the observed values + double mean = sigma_time / n.size(); + result.rms = sqrt(rms / n.size()) / mean; + + return result; +} + +// Find the coefficient for the high-order term in the running time, by +// minimizing the sum of squares of relative error. +// - n : Vector containing the size of the benchmark tests. +// - time : Vector containing the times for the benchmark tests. +// - complexity : If different than oAuto, the fitting curve will stick to +// this one. If it is oAuto, it will be calculated the best +// fitting curve. +LeastSq MinimalLeastSq(const std::vector& n, + const std::vector& time, const BigO complexity) { + CHECK_EQ(n.size(), time.size()); + CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two + // benchmark runs are given + CHECK_NE(complexity, oNone); + + LeastSq best_fit; + + if (complexity == oAuto) { + std::vector fit_curves = {oLogN, oN, oNLogN, oNSquared, oNCubed}; + + // Take o1 as default best fitting curve + best_fit = MinimalLeastSq(n, time, FittingCurve(o1)); + best_fit.complexity = o1; + + // Compute all possible fitting curves and stick to the best one + for (const auto& fit : fit_curves) { + LeastSq current_fit = MinimalLeastSq(n, time, FittingCurve(fit)); + if (current_fit.rms < best_fit.rms) { + best_fit = current_fit; + best_fit.complexity = fit; + } + } + } else { + best_fit = MinimalLeastSq(n, time, FittingCurve(complexity)); + best_fit.complexity = complexity; + } + + return best_fit; +} + +std::vector ComputeBigO( + const std::vector& reports) { + typedef BenchmarkReporter::Run Run; + std::vector results; + + if (reports.size() < 2) return results; + + // Accumulators. + std::vector n; + std::vector real_time; + std::vector cpu_time; + + // Populate the accumulators. + for (const Run& run : reports) { + CHECK_GT(run.complexity_n, 0) << "Did you forget to call SetComplexityN?"; + n.push_back(run.complexity_n); + real_time.push_back(run.real_accumulated_time / run.iterations); + cpu_time.push_back(run.cpu_accumulated_time / run.iterations); + } + + LeastSq result_cpu; + LeastSq result_real; + + if (reports[0].complexity == oLambda) { + result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity_lambda); + result_real = MinimalLeastSq(n, real_time, reports[0].complexity_lambda); + } else { + result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity); + result_real = MinimalLeastSq(n, real_time, result_cpu.complexity); + } + + // Drop the 'args' when reporting complexity. + auto run_name = reports[0].run_name; + run_name.args.clear(); + + // Get the data from the accumulator to BenchmarkReporter::Run's. + Run big_o; + big_o.run_name = run_name; + big_o.run_type = BenchmarkReporter::Run::RT_Aggregate; + big_o.repetitions = reports[0].repetitions; + big_o.repetition_index = Run::no_repetition_index; + big_o.threads = reports[0].threads; + big_o.aggregate_name = "BigO"; + big_o.report_label = reports[0].report_label; + big_o.iterations = 0; + big_o.real_accumulated_time = result_real.coef; + big_o.cpu_accumulated_time = result_cpu.coef; + big_o.report_big_o = true; + big_o.complexity = result_cpu.complexity; + + // All the time results are reported after being multiplied by the + // time unit multiplier. But since RMS is a relative quantity it + // should not be multiplied at all. So, here, we _divide_ it by the + // multiplier so that when it is multiplied later the result is the + // correct one. + double multiplier = GetTimeUnitMultiplier(reports[0].time_unit); + + // Only add label to mean/stddev if it is same for all runs + Run rms; + rms.run_name = run_name; + rms.run_type = BenchmarkReporter::Run::RT_Aggregate; + rms.aggregate_name = "RMS"; + rms.report_label = big_o.report_label; + rms.iterations = 0; + rms.repetition_index = Run::no_repetition_index; + rms.repetitions = reports[0].repetitions; + rms.threads = reports[0].threads; + rms.real_accumulated_time = result_real.rms / multiplier; + rms.cpu_accumulated_time = result_cpu.rms / multiplier; + rms.report_rms = true; + rms.complexity = result_cpu.complexity; + // don't forget to keep the time unit, or we won't be able to + // recover the correct value. + rms.time_unit = reports[0].time_unit; + + results.push_back(big_o); + results.push_back(rms); + return results; +} + +} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/complexity.h b/benchmarks/thirdparty/benchmark/src/complexity.h new file mode 100755 index 0000000000..df29b48d29 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/complexity.h @@ -0,0 +1,55 @@ +// Copyright 2016 Ismael Jimenez Martinez. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Source project : https://github.com/ismaelJimenez/cpp.leastsq +// Adapted to be used with google benchmark + +#ifndef COMPLEXITY_H_ +#define COMPLEXITY_H_ + +#include +#include + +#include "benchmark/benchmark.h" + +namespace benchmark { + +// Return a vector containing the bigO and RMS information for the specified +// list of reports. If 'reports.size() < 2' an empty vector is returned. +std::vector ComputeBigO( + const std::vector& reports); + +// This data structure will contain the result returned by MinimalLeastSq +// - coef : Estimated coeficient for the high-order term as +// interpolated from data. +// - rms : Normalized Root Mean Squared Error. +// - complexity : Scalability form (e.g. oN, oNLogN). In case a scalability +// form has been provided to MinimalLeastSq this will return +// the same value. In case BigO::oAuto has been selected, this +// parameter will return the best fitting curve detected. + +struct LeastSq { + LeastSq() : coef(0.0), rms(0.0), complexity(oNone) {} + + double coef; + double rms; + BigO complexity; +}; + +// Function to return an string for the calculated complexity +std::string GetBigOString(BigO complexity); + +} // end namespace benchmark + +#endif // COMPLEXITY_H_ diff --git a/benchmarks/thirdparty/benchmark/src/console_reporter.cc b/benchmarks/thirdparty/benchmark/src/console_reporter.cc new file mode 100755 index 0000000000..6fd764525e --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/console_reporter.cc @@ -0,0 +1,177 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "benchmark/benchmark.h" +#include "check.h" +#include "colorprint.h" +#include "commandlineflags.h" +#include "complexity.h" +#include "counter.h" +#include "internal_macros.h" +#include "string_util.h" +#include "timers.h" + +namespace benchmark { + +bool ConsoleReporter::ReportContext(const Context& context) { + name_field_width_ = context.name_field_width; + printed_header_ = false; + prev_counters_.clear(); + + PrintBasicContext(&GetErrorStream(), context); + +#ifdef BENCHMARK_OS_WINDOWS + if ((output_options_ & OO_Color) && &std::cout != &GetOutputStream()) { + GetErrorStream() + << "Color printing is only supported for stdout on windows." + " Disabling color printing\n"; + output_options_ = static_cast< OutputOptions >(output_options_ & ~OO_Color); + } +#endif + + return true; +} + +void ConsoleReporter::PrintHeader(const Run& run) { + std::string str = FormatString("%-*s %13s %15s %12s", static_cast(name_field_width_), + "Benchmark", "Time", "CPU", "Iterations"); + if(!run.counters.empty()) { + if(output_options_ & OO_Tabular) { + for(auto const& c : run.counters) { + str += FormatString(" %10s", c.first.c_str()); + } + } else { + str += " UserCounters..."; + } + } + std::string line = std::string(str.length(), '-'); + GetOutputStream() << line << "\n" << str << "\n" << line << "\n"; +} + +void ConsoleReporter::ReportRuns(const std::vector& reports) { + for (const auto& run : reports) { + // print the header: + // --- if none was printed yet + bool print_header = !printed_header_; + // --- or if the format is tabular and this run + // has different fields from the prev header + print_header |= (output_options_ & OO_Tabular) && + (!internal::SameNames(run.counters, prev_counters_)); + if (print_header) { + printed_header_ = true; + prev_counters_ = run.counters; + PrintHeader(run); + } + // As an alternative to printing the headers like this, we could sort + // the benchmarks by header and then print. But this would require + // waiting for the full results before printing, or printing twice. + PrintRunData(run); + } +} + +static void IgnoreColorPrint(std::ostream& out, LogColor, const char* fmt, + ...) { + va_list args; + va_start(args, fmt); + out << FormatString(fmt, args); + va_end(args); +} + + +static std::string FormatTime(double time) { + // Align decimal places... + if (time < 1.0) { + return FormatString("%10.3f", time); + } + if (time < 10.0) { + return FormatString("%10.2f", time); + } + if (time < 100.0) { + return FormatString("%10.1f", time); + } + return FormatString("%10.0f", time); +} + +void ConsoleReporter::PrintRunData(const Run& result) { + typedef void(PrinterFn)(std::ostream&, LogColor, const char*, ...); + auto& Out = GetOutputStream(); + PrinterFn* printer = (output_options_ & OO_Color) ? + (PrinterFn*)ColorPrintf : IgnoreColorPrint; + auto name_color = + (result.report_big_o || result.report_rms) ? COLOR_BLUE : COLOR_GREEN; + printer(Out, name_color, "%-*s ", name_field_width_, + result.benchmark_name().c_str()); + + if (result.error_occurred) { + printer(Out, COLOR_RED, "ERROR OCCURRED: \'%s\'", + result.error_message.c_str()); + printer(Out, COLOR_DEFAULT, "\n"); + return; + } + + const double real_time = result.GetAdjustedRealTime(); + const double cpu_time = result.GetAdjustedCPUTime(); + const std::string real_time_str = FormatTime(real_time); + const std::string cpu_time_str = FormatTime(cpu_time); + + + if (result.report_big_o) { + std::string big_o = GetBigOString(result.complexity); + printer(Out, COLOR_YELLOW, "%10.2f %-4s %10.2f %-4s ", real_time, big_o.c_str(), + cpu_time, big_o.c_str()); + } else if (result.report_rms) { + printer(Out, COLOR_YELLOW, "%10.0f %-4s %10.0f %-4s ", real_time * 100, "%", + cpu_time * 100, "%"); + } else { + const char* timeLabel = GetTimeUnitString(result.time_unit); + printer(Out, COLOR_YELLOW, "%s %-4s %s %-4s ", real_time_str.c_str(), timeLabel, + cpu_time_str.c_str(), timeLabel); + } + + if (!result.report_big_o && !result.report_rms) { + printer(Out, COLOR_CYAN, "%10lld", result.iterations); + } + + for (auto& c : result.counters) { + const std::size_t cNameLen = std::max(std::string::size_type(10), + c.first.length()); + auto const& s = HumanReadableNumber(c.second.value, c.second.oneK); + const char* unit = ""; + if (c.second.flags & Counter::kIsRate) + unit = (c.second.flags & Counter::kInvert) ? "s" : "/s"; + if (output_options_ & OO_Tabular) { + printer(Out, COLOR_DEFAULT, " %*s%s", cNameLen - strlen(unit), s.c_str(), + unit); + } else { + printer(Out, COLOR_DEFAULT, " %s=%s%s", c.first.c_str(), s.c_str(), unit); + } + } + + if (!result.report_label.empty()) { + printer(Out, COLOR_DEFAULT, " %s", result.report_label.c_str()); + } + + printer(Out, COLOR_DEFAULT, "\n"); +} + +} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/counter.cc b/benchmarks/thirdparty/benchmark/src/counter.cc new file mode 100755 index 0000000000..cf5b78ee3a --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/counter.cc @@ -0,0 +1,80 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "counter.h" + +namespace benchmark { +namespace internal { + +double Finish(Counter const& c, IterationCount iterations, double cpu_time, + double num_threads) { + double v = c.value; + if (c.flags & Counter::kIsRate) { + v /= cpu_time; + } + if (c.flags & Counter::kAvgThreads) { + v /= num_threads; + } + if (c.flags & Counter::kIsIterationInvariant) { + v *= iterations; + } + if (c.flags & Counter::kAvgIterations) { + v /= iterations; + } + + if (c.flags & Counter::kInvert) { // Invert is *always* last. + v = 1.0 / v; + } + return v; +} + +void Finish(UserCounters* l, IterationCount iterations, double cpu_time, + double num_threads) { + for (auto& c : *l) { + c.second.value = Finish(c.second, iterations, cpu_time, num_threads); + } +} + +void Increment(UserCounters* l, UserCounters const& r) { + // add counters present in both or just in *l + for (auto& c : *l) { + auto it = r.find(c.first); + if (it != r.end()) { + c.second.value = c.second + it->second; + } + } + // add counters present in r, but not in *l + for (auto const& tc : r) { + auto it = l->find(tc.first); + if (it == l->end()) { + (*l)[tc.first] = tc.second; + } + } +} + +bool SameNames(UserCounters const& l, UserCounters const& r) { + if (&l == &r) return true; + if (l.size() != r.size()) { + return false; + } + for (auto const& c : l) { + if (r.find(c.first) == r.end()) { + return false; + } + } + return true; +} + +} // end namespace internal +} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/counter.h b/benchmarks/thirdparty/benchmark/src/counter.h new file mode 100755 index 0000000000..1f5a58e31f --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/counter.h @@ -0,0 +1,32 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef BENCHMARK_COUNTER_H_ +#define BENCHMARK_COUNTER_H_ + +#include "benchmark/benchmark.h" + +namespace benchmark { + +// these counter-related functions are hidden to reduce API surface. +namespace internal { +void Finish(UserCounters* l, IterationCount iterations, double time, + double num_threads); +void Increment(UserCounters* l, UserCounters const& r); +bool SameNames(UserCounters const& l, UserCounters const& r); +} // end namespace internal + +} // end namespace benchmark + +#endif // BENCHMARK_COUNTER_H_ diff --git a/benchmarks/thirdparty/benchmark/src/csv_reporter.cc b/benchmarks/thirdparty/benchmark/src/csv_reporter.cc new file mode 100755 index 0000000000..af2c18fc8a --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/csv_reporter.cc @@ -0,0 +1,154 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "benchmark/benchmark.h" +#include "complexity.h" + +#include +#include +#include +#include +#include +#include + +#include "check.h" +#include "string_util.h" +#include "timers.h" + +// File format reference: http://edoceo.com/utilitas/csv-file-format. + +namespace benchmark { + +namespace { +std::vector elements = { + "name", "iterations", "real_time", "cpu_time", + "time_unit", "bytes_per_second", "items_per_second", "label", + "error_occurred", "error_message"}; +} // namespace + +std::string CsvEscape(const std::string & s) { + std::string tmp; + tmp.reserve(s.size() + 2); + for (char c : s) { + switch (c) { + case '"' : tmp += "\"\""; break; + default : tmp += c; break; + } + } + return '"' + tmp + '"'; +} + +bool CSVReporter::ReportContext(const Context& context) { + PrintBasicContext(&GetErrorStream(), context); + return true; +} + +void CSVReporter::ReportRuns(const std::vector& reports) { + std::ostream& Out = GetOutputStream(); + + if (!printed_header_) { + // save the names of all the user counters + for (const auto& run : reports) { + for (const auto& cnt : run.counters) { + if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second") + continue; + user_counter_names_.insert(cnt.first); + } + } + + // print the header + for (auto B = elements.begin(); B != elements.end();) { + Out << *B++; + if (B != elements.end()) Out << ","; + } + for (auto B = user_counter_names_.begin(); + B != user_counter_names_.end();) { + Out << ",\"" << *B++ << "\""; + } + Out << "\n"; + + printed_header_ = true; + } else { + // check that all the current counters are saved in the name set + for (const auto& run : reports) { + for (const auto& cnt : run.counters) { + if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second") + continue; + CHECK(user_counter_names_.find(cnt.first) != user_counter_names_.end()) + << "All counters must be present in each run. " + << "Counter named \"" << cnt.first + << "\" was not in a run after being added to the header"; + } + } + } + + // print results for each run + for (const auto& run : reports) { + PrintRunData(run); + } +} + +void CSVReporter::PrintRunData(const Run& run) { + std::ostream& Out = GetOutputStream(); + Out << CsvEscape(run.benchmark_name()) << ","; + if (run.error_occurred) { + Out << std::string(elements.size() - 3, ','); + Out << "true,"; + Out << CsvEscape(run.error_message) << "\n"; + return; + } + + // Do not print iteration on bigO and RMS report + if (!run.report_big_o && !run.report_rms) { + Out << run.iterations; + } + Out << ","; + + Out << run.GetAdjustedRealTime() << ","; + Out << run.GetAdjustedCPUTime() << ","; + + // Do not print timeLabel on bigO and RMS report + if (run.report_big_o) { + Out << GetBigOString(run.complexity); + } else if (!run.report_rms) { + Out << GetTimeUnitString(run.time_unit); + } + Out << ","; + + if (run.counters.find("bytes_per_second") != run.counters.end()) { + Out << run.counters.at("bytes_per_second"); + } + Out << ","; + if (run.counters.find("items_per_second") != run.counters.end()) { + Out << run.counters.at("items_per_second"); + } + Out << ","; + if (!run.report_label.empty()) { + Out << CsvEscape(run.report_label); + } + Out << ",,"; // for error_occurred and error_message + + // Print user counters + for (const auto& ucn : user_counter_names_) { + auto it = run.counters.find(ucn); + if (it == run.counters.end()) { + Out << ","; + } else { + Out << "," << it->second; + } + } + Out << '\n'; +} + +} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/cycleclock.h b/benchmarks/thirdparty/benchmark/src/cycleclock.h new file mode 100755 index 0000000000..179c67cd61 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/cycleclock.h @@ -0,0 +1,206 @@ +// ---------------------------------------------------------------------- +// CycleClock +// A CycleClock tells you the current time in Cycles. The "time" +// is actually time since power-on. This is like time() but doesn't +// involve a system call and is much more precise. +// +// NOTE: Not all cpu/platform/kernel combinations guarantee that this +// clock increments at a constant rate or is synchronized across all logical +// cpus in a system. +// +// If you need the above guarantees, please consider using a different +// API. There are efforts to provide an interface which provides a millisecond +// granularity and implemented as a memory read. A memory read is generally +// cheaper than the CycleClock for many architectures. +// +// Also, in some out of order CPU implementations, the CycleClock is not +// serializing. So if you're trying to count at cycles granularity, your +// data might be inaccurate due to out of order instruction execution. +// ---------------------------------------------------------------------- + +#ifndef BENCHMARK_CYCLECLOCK_H_ +#define BENCHMARK_CYCLECLOCK_H_ + +#include + +#include "benchmark/benchmark.h" +#include "internal_macros.h" + +#if defined(BENCHMARK_OS_MACOSX) +#include +#endif +// For MSVC, we want to use '_asm rdtsc' when possible (since it works +// with even ancient MSVC compilers), and when not possible the +// __rdtsc intrinsic, declared in . Unfortunately, in some +// environments, and have conflicting +// declarations of some other intrinsics, breaking compilation. +// Therefore, we simply declare __rdtsc ourselves. See also +// http://connect.microsoft.com/VisualStudio/feedback/details/262047 +#if defined(COMPILER_MSVC) && !defined(_M_IX86) +extern "C" uint64_t __rdtsc(); +#pragma intrinsic(__rdtsc) +#endif + +#if !defined(BENCHMARK_OS_WINDOWS) || defined(BENCHMARK_OS_MINGW) +#include +#include +#endif + +#ifdef BENCHMARK_OS_EMSCRIPTEN +#include +#endif + +namespace benchmark { +// NOTE: only i386 and x86_64 have been well tested. +// PPC, sparc, alpha, and ia64 are based on +// http://peter.kuscsik.com/wordpress/?p=14 +// with modifications by m3b. See also +// https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h +namespace cycleclock { +// This should return the number of cycles since power-on. Thread-safe. +inline BENCHMARK_ALWAYS_INLINE int64_t Now() { +#if defined(BENCHMARK_OS_MACOSX) + // this goes at the top because we need ALL Macs, regardless of + // architecture, to return the number of "mach time units" that + // have passed since startup. See sysinfo.cc where + // InitializeSystemInfo() sets the supposed cpu clock frequency of + // macs to the number of mach time units per second, not actual + // CPU clock frequency (which can change in the face of CPU + // frequency scaling). Also note that when the Mac sleeps, this + // counter pauses; it does not continue counting, nor does it + // reset to zero. + return mach_absolute_time(); +#elif defined(BENCHMARK_OS_EMSCRIPTEN) + // this goes above x86-specific code because old versions of Emscripten + // define __x86_64__, although they have nothing to do with it. + return static_cast(emscripten_get_now() * 1e+6); +#elif defined(__i386__) + int64_t ret; + __asm__ volatile("rdtsc" : "=A"(ret)); + return ret; +#elif defined(__x86_64__) || defined(__amd64__) + uint64_t low, high; + __asm__ volatile("rdtsc" : "=a"(low), "=d"(high)); + return (high << 32) | low; +#elif defined(__powerpc__) || defined(__ppc__) + // This returns a time-base, which is not always precisely a cycle-count. +#if defined(__powerpc64__) || defined(__ppc64__) + int64_t tb; + asm volatile("mfspr %0, 268" : "=r"(tb)); + return tb; +#else + uint32_t tbl, tbu0, tbu1; + asm volatile( + "mftbu %0\n" + "mftbl %1\n" + "mftbu %2" + : "=r"(tbu0), "=r"(tbl), "=r"(tbu1)); + tbl &= -static_cast(tbu0 == tbu1); + // high 32 bits in tbu1; low 32 bits in tbl (tbu0 is no longer needed) + return (static_cast(tbu1) << 32) | tbl; +#endif +#elif defined(__sparc__) + int64_t tick; + asm(".byte 0x83, 0x41, 0x00, 0x00"); + asm("mov %%g1, %0" : "=r"(tick)); + return tick; +#elif defined(__ia64__) + int64_t itc; + asm("mov %0 = ar.itc" : "=r"(itc)); + return itc; +#elif defined(COMPILER_MSVC) && defined(_M_IX86) + // Older MSVC compilers (like 7.x) don't seem to support the + // __rdtsc intrinsic properly, so I prefer to use _asm instead + // when I know it will work. Otherwise, I'll use __rdtsc and hope + // the code is being compiled with a non-ancient compiler. + _asm rdtsc +#elif defined(COMPILER_MSVC) + return __rdtsc(); +#elif defined(BENCHMARK_OS_NACL) + // Native Client validator on x86/x86-64 allows RDTSC instructions, + // and this case is handled above. Native Client validator on ARM + // rejects MRC instructions (used in the ARM-specific sequence below), + // so we handle it here. Portable Native Client compiles to + // architecture-agnostic bytecode, which doesn't provide any + // cycle counter access mnemonics. + + // Native Client does not provide any API to access cycle counter. + // Use clock_gettime(CLOCK_MONOTONIC, ...) instead of gettimeofday + // because is provides nanosecond resolution (which is noticable at + // least for PNaCl modules running on x86 Mac & Linux). + // Initialize to always return 0 if clock_gettime fails. + struct timespec ts = {0, 0}; + clock_gettime(CLOCK_MONOTONIC, &ts); + return static_cast(ts.tv_sec) * 1000000000 + ts.tv_nsec; +#elif defined(__aarch64__) + // System timer of ARMv8 runs at a different frequency than the CPU's. + // The frequency is fixed, typically in the range 1-50MHz. It can be + // read at CNTFRQ special register. We assume the OS has set up + // the virtual timer properly. + int64_t virtual_timer_value; + asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value)); + return virtual_timer_value; +#elif defined(__ARM_ARCH) + // V6 is the earliest arch that has a standard cyclecount + // Native Client validator doesn't allow MRC instructions. +#if (__ARM_ARCH >= 6) + uint32_t pmccntr; + uint32_t pmuseren; + uint32_t pmcntenset; + // Read the user mode perf monitor counter access permissions. + asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r"(pmuseren)); + if (pmuseren & 1) { // Allows reading perfmon counters for user mode code. + asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r"(pmcntenset)); + if (pmcntenset & 0x80000000ul) { // Is it counting? + asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r"(pmccntr)); + // The counter is set up to count every 64th cycle + return static_cast(pmccntr) * 64; // Should optimize to << 6 + } + } +#endif + struct timeval tv; + gettimeofday(&tv, nullptr); + return static_cast(tv.tv_sec) * 1000000 + tv.tv_usec; +#elif defined(__mips__) + // mips apparently only allows rdtsc for superusers, so we fall + // back to gettimeofday. It's possible clock_gettime would be better. + struct timeval tv; + gettimeofday(&tv, nullptr); + return static_cast(tv.tv_sec) * 1000000 + tv.tv_usec; +#elif defined(__s390__) // Covers both s390 and s390x. + // Return the CPU clock. + uint64_t tsc; + asm("stck %0" : "=Q"(tsc) : : "cc"); + return tsc; +#elif defined(__riscv) // RISC-V + // Use RDCYCLE (and RDCYCLEH on riscv32) +#if __riscv_xlen == 32 + uint32_t cycles_lo, cycles_hi0, cycles_hi1; + // This asm also includes the PowerPC overflow handling strategy, as above. + // Implemented in assembly because Clang insisted on branching. + asm volatile( + "rdcycleh %0\n" + "rdcycle %1\n" + "rdcycleh %2\n" + "sub %0, %0, %2\n" + "seqz %0, %0\n" + "sub %0, zero, %0\n" + "and %1, %1, %0\n" + : "=r"(cycles_hi0), "=r"(cycles_lo), "=r"(cycles_hi1)); + return (static_cast(cycles_hi1) << 32) | cycles_lo; +#else + uint64_t cycles; + asm volatile("rdcycle %0" : "=r"(cycles)); + return cycles; +#endif +#else +// The soft failover to a generic implementation is automatic only for ARM. +// For other platforms the developer is expected to make an attempt to create +// a fast implementation and use generic version if nothing better is available. +#error You need to define CycleTimer for your OS and CPU +#endif +} +} // end namespace cycleclock +} // end namespace benchmark + +#endif // BENCHMARK_CYCLECLOCK_H_ diff --git a/benchmarks/thirdparty/benchmark/src/internal_macros.h b/benchmarks/thirdparty/benchmark/src/internal_macros.h new file mode 100755 index 0000000000..6adf00d056 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/internal_macros.h @@ -0,0 +1,94 @@ +#ifndef BENCHMARK_INTERNAL_MACROS_H_ +#define BENCHMARK_INTERNAL_MACROS_H_ + +#include "benchmark/benchmark.h" + +/* Needed to detect STL */ +#include + +// clang-format off + +#ifndef __has_feature +#define __has_feature(x) 0 +#endif + +#if defined(__clang__) + #if !defined(COMPILER_CLANG) + #define COMPILER_CLANG + #endif +#elif defined(_MSC_VER) + #if !defined(COMPILER_MSVC) + #define COMPILER_MSVC + #endif +#elif defined(__GNUC__) + #if !defined(COMPILER_GCC) + #define COMPILER_GCC + #endif +#endif + +#if __has_feature(cxx_attributes) + #define BENCHMARK_NORETURN [[noreturn]] +#elif defined(__GNUC__) + #define BENCHMARK_NORETURN __attribute__((noreturn)) +#elif defined(COMPILER_MSVC) + #define BENCHMARK_NORETURN __declspec(noreturn) +#else + #define BENCHMARK_NORETURN +#endif + +#if defined(__CYGWIN__) + #define BENCHMARK_OS_CYGWIN 1 +#elif defined(_WIN32) + #define BENCHMARK_OS_WINDOWS 1 + #if defined(__MINGW32__) + #define BENCHMARK_OS_MINGW 1 + #endif +#elif defined(__APPLE__) + #define BENCHMARK_OS_APPLE 1 + #include "TargetConditionals.h" + #if defined(TARGET_OS_MAC) + #define BENCHMARK_OS_MACOSX 1 + #if defined(TARGET_OS_IPHONE) + #define BENCHMARK_OS_IOS 1 + #endif + #endif +#elif defined(__FreeBSD__) + #define BENCHMARK_OS_FREEBSD 1 +#elif defined(__NetBSD__) + #define BENCHMARK_OS_NETBSD 1 +#elif defined(__OpenBSD__) + #define BENCHMARK_OS_OPENBSD 1 +#elif defined(__linux__) + #define BENCHMARK_OS_LINUX 1 +#elif defined(__native_client__) + #define BENCHMARK_OS_NACL 1 +#elif defined(__EMSCRIPTEN__) + #define BENCHMARK_OS_EMSCRIPTEN 1 +#elif defined(__rtems__) + #define BENCHMARK_OS_RTEMS 1 +#elif defined(__Fuchsia__) +#define BENCHMARK_OS_FUCHSIA 1 +#elif defined (__SVR4) && defined (__sun) +#define BENCHMARK_OS_SOLARIS 1 +#elif defined(__QNX__) +#define BENCHMARK_OS_QNX 1 +#endif + +#if defined(__ANDROID__) && defined(__GLIBCXX__) +#define BENCHMARK_STL_ANDROID_GNUSTL 1 +#endif + +#if !__has_feature(cxx_exceptions) && !defined(__cpp_exceptions) \ + && !defined(__EXCEPTIONS) + #define BENCHMARK_HAS_NO_EXCEPTIONS +#endif + +#if defined(COMPILER_CLANG) || defined(COMPILER_GCC) + #define BENCHMARK_MAYBE_UNUSED __attribute__((unused)) +#else + #define BENCHMARK_MAYBE_UNUSED +#endif + +// clang-format on + +#endif // BENCHMARK_INTERNAL_MACROS_H_ diff --git a/benchmarks/thirdparty/benchmark/src/json_reporter.cc b/benchmarks/thirdparty/benchmark/src/json_reporter.cc new file mode 100755 index 0000000000..959d245a34 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/json_reporter.cc @@ -0,0 +1,255 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "benchmark/benchmark.h" +#include "complexity.h" + +#include +#include +#include +#include // for setprecision +#include +#include +#include +#include +#include + +#include "string_util.h" +#include "timers.h" + +namespace benchmark { + +namespace { + +std::string StrEscape(const std::string & s) { + std::string tmp; + tmp.reserve(s.size()); + for (char c : s) { + switch (c) { + case '\b': tmp += "\\b"; break; + case '\f': tmp += "\\f"; break; + case '\n': tmp += "\\n"; break; + case '\r': tmp += "\\r"; break; + case '\t': tmp += "\\t"; break; + case '\\': tmp += "\\\\"; break; + case '"' : tmp += "\\\""; break; + default : tmp += c; break; + } + } + return tmp; +} + +std::string FormatKV(std::string const& key, std::string const& value) { + return StrFormat("\"%s\": \"%s\"", StrEscape(key).c_str(), StrEscape(value).c_str()); +} + +std::string FormatKV(std::string const& key, const char* value) { + return StrFormat("\"%s\": \"%s\"", StrEscape(key).c_str(), StrEscape(value).c_str()); +} + +std::string FormatKV(std::string const& key, bool value) { + return StrFormat("\"%s\": %s", StrEscape(key).c_str(), value ? "true" : "false"); +} + +std::string FormatKV(std::string const& key, int64_t value) { + std::stringstream ss; + ss << '"' << StrEscape(key) << "\": " << value; + return ss.str(); +} + +std::string FormatKV(std::string const& key, IterationCount value) { + std::stringstream ss; + ss << '"' << StrEscape(key) << "\": " << value; + return ss.str(); +} + +std::string FormatKV(std::string const& key, double value) { + std::stringstream ss; + ss << '"' << StrEscape(key) << "\": "; + + if (std::isnan(value)) + ss << (value < 0 ? "-" : "") << "NaN"; + else if (std::isinf(value)) + ss << (value < 0 ? "-" : "") << "Infinity"; + else { + const auto max_digits10 = + std::numeric_limits::max_digits10; + const auto max_fractional_digits10 = max_digits10 - 1; + ss << std::scientific << std::setprecision(max_fractional_digits10) + << value; + } + return ss.str(); +} + +int64_t RoundDouble(double v) { return std::lround(v); } + +} // end namespace + +bool JSONReporter::ReportContext(const Context& context) { + std::ostream& out = GetOutputStream(); + + out << "{\n"; + std::string inner_indent(2, ' '); + + // Open context block and print context information. + out << inner_indent << "\"context\": {\n"; + std::string indent(4, ' '); + + std::string walltime_value = LocalDateTimeString(); + out << indent << FormatKV("date", walltime_value) << ",\n"; + + out << indent << FormatKV("host_name", context.sys_info.name) << ",\n"; + + if (Context::executable_name) { + out << indent << FormatKV("executable", Context::executable_name) << ",\n"; + } + + CPUInfo const& info = context.cpu_info; + out << indent << FormatKV("num_cpus", static_cast(info.num_cpus)) + << ",\n"; + out << indent + << FormatKV("mhz_per_cpu", + RoundDouble(info.cycles_per_second / 1000000.0)) + << ",\n"; + if (CPUInfo::Scaling::UNKNOWN != info.scaling) { + out << indent << FormatKV("cpu_scaling_enabled", info.scaling == CPUInfo::Scaling::ENABLED ? true : false) + << ",\n"; + } + + out << indent << "\"caches\": [\n"; + indent = std::string(6, ' '); + std::string cache_indent(8, ' '); + for (size_t i = 0; i < info.caches.size(); ++i) { + auto& CI = info.caches[i]; + out << indent << "{\n"; + out << cache_indent << FormatKV("type", CI.type) << ",\n"; + out << cache_indent << FormatKV("level", static_cast(CI.level)) + << ",\n"; + out << cache_indent + << FormatKV("size", static_cast(CI.size)) << ",\n"; + out << cache_indent + << FormatKV("num_sharing", static_cast(CI.num_sharing)) + << "\n"; + out << indent << "}"; + if (i != info.caches.size() - 1) out << ","; + out << "\n"; + } + indent = std::string(4, ' '); + out << indent << "],\n"; + out << indent << "\"load_avg\": ["; + for (auto it = info.load_avg.begin(); it != info.load_avg.end();) { + out << *it++; + if (it != info.load_avg.end()) out << ","; + } + out << "],\n"; + +#if defined(NDEBUG) + const char build_type[] = "release"; +#else + const char build_type[] = "debug"; +#endif + out << indent << FormatKV("library_build_type", build_type) << "\n"; + // Close context block and open the list of benchmarks. + out << inner_indent << "},\n"; + out << inner_indent << "\"benchmarks\": [\n"; + return true; +} + +void JSONReporter::ReportRuns(std::vector const& reports) { + if (reports.empty()) { + return; + } + std::string indent(4, ' '); + std::ostream& out = GetOutputStream(); + if (!first_report_) { + out << ",\n"; + } + first_report_ = false; + + for (auto it = reports.begin(); it != reports.end(); ++it) { + out << indent << "{\n"; + PrintRunData(*it); + out << indent << '}'; + auto it_cp = it; + if (++it_cp != reports.end()) { + out << ",\n"; + } + } +} + +void JSONReporter::Finalize() { + // Close the list of benchmarks and the top level object. + GetOutputStream() << "\n ]\n}\n"; +} + +void JSONReporter::PrintRunData(Run const& run) { + std::string indent(6, ' '); + std::ostream& out = GetOutputStream(); + out << indent << FormatKV("name", run.benchmark_name()) << ",\n"; + out << indent << FormatKV("run_name", run.run_name.str()) << ",\n"; + out << indent << FormatKV("run_type", [&run]() -> const char* { + switch (run.run_type) { + case BenchmarkReporter::Run::RT_Iteration: + return "iteration"; + case BenchmarkReporter::Run::RT_Aggregate: + return "aggregate"; + } + BENCHMARK_UNREACHABLE(); + }()) << ",\n"; + out << indent << FormatKV("repetitions", run.repetitions) << ",\n"; + if (run.run_type != BenchmarkReporter::Run::RT_Aggregate) { + out << indent << FormatKV("repetition_index", run.repetition_index) + << ",\n"; + } + out << indent << FormatKV("threads", run.threads) << ",\n"; + if (run.run_type == BenchmarkReporter::Run::RT_Aggregate) { + out << indent << FormatKV("aggregate_name", run.aggregate_name) << ",\n"; + } + if (run.error_occurred) { + out << indent << FormatKV("error_occurred", run.error_occurred) << ",\n"; + out << indent << FormatKV("error_message", run.error_message) << ",\n"; + } + if (!run.report_big_o && !run.report_rms) { + out << indent << FormatKV("iterations", run.iterations) << ",\n"; + out << indent << FormatKV("real_time", run.GetAdjustedRealTime()) << ",\n"; + out << indent << FormatKV("cpu_time", run.GetAdjustedCPUTime()); + out << ",\n" + << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit)); + } else if (run.report_big_o) { + out << indent << FormatKV("cpu_coefficient", run.GetAdjustedCPUTime()) + << ",\n"; + out << indent << FormatKV("real_coefficient", run.GetAdjustedRealTime()) + << ",\n"; + out << indent << FormatKV("big_o", GetBigOString(run.complexity)) << ",\n"; + out << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit)); + } else if (run.report_rms) { + out << indent << FormatKV("rms", run.GetAdjustedCPUTime()); + } + + for (auto& c : run.counters) { + out << ",\n" << indent << FormatKV(c.first, c.second); + } + + if (run.has_memory_result) { + out << ",\n" << indent << FormatKV("allocs_per_iter", run.allocs_per_iter); + out << ",\n" << indent << FormatKV("max_bytes_used", run.max_bytes_used); + } + + if (!run.report_label.empty()) { + out << ",\n" << indent << FormatKV("label", run.report_label); + } + out << '\n'; +} + +} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/log.h b/benchmarks/thirdparty/benchmark/src/log.h new file mode 100755 index 0000000000..47d0c35c01 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/log.h @@ -0,0 +1,74 @@ +#ifndef BENCHMARK_LOG_H_ +#define BENCHMARK_LOG_H_ + +#include +#include + +#include "benchmark/benchmark.h" + +namespace benchmark { +namespace internal { + +typedef std::basic_ostream&(EndLType)(std::basic_ostream&); + +class LogType { + friend LogType& GetNullLogInstance(); + friend LogType& GetErrorLogInstance(); + + // FIXME: Add locking to output. + template + friend LogType& operator<<(LogType&, Tp const&); + friend LogType& operator<<(LogType&, EndLType*); + + private: + LogType(std::ostream* out) : out_(out) {} + std::ostream* out_; + BENCHMARK_DISALLOW_COPY_AND_ASSIGN(LogType); +}; + +template +LogType& operator<<(LogType& log, Tp const& value) { + if (log.out_) { + *log.out_ << value; + } + return log; +} + +inline LogType& operator<<(LogType& log, EndLType* m) { + if (log.out_) { + *log.out_ << m; + } + return log; +} + +inline int& LogLevel() { + static int log_level = 0; + return log_level; +} + +inline LogType& GetNullLogInstance() { + static LogType log(nullptr); + return log; +} + +inline LogType& GetErrorLogInstance() { + static LogType log(&std::clog); + return log; +} + +inline LogType& GetLogInstanceForLevel(int level) { + if (level <= LogLevel()) { + return GetErrorLogInstance(); + } + return GetNullLogInstance(); +} + +} // end namespace internal +} // end namespace benchmark + +// clang-format off +#define VLOG(x) \ + (::benchmark::internal::GetLogInstanceForLevel(x) << "-- LOG(" << x << "):" \ + " ") +// clang-format on +#endif diff --git a/benchmarks/thirdparty/benchmark/src/mutex.h b/benchmarks/thirdparty/benchmark/src/mutex.h new file mode 100755 index 0000000000..3fac79aea4 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/mutex.h @@ -0,0 +1,155 @@ +#ifndef BENCHMARK_MUTEX_H_ +#define BENCHMARK_MUTEX_H_ + +#include +#include + +#include "check.h" + +// Enable thread safety attributes only with clang. +// The attributes can be safely erased when compiling with other compilers. +#if defined(HAVE_THREAD_SAFETY_ATTRIBUTES) +#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x)) +#else +#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op +#endif + +#define CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(capability(x)) + +#define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable) + +#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x)) + +#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x)) + +#define ACQUIRED_BEFORE(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__)) + +#define ACQUIRED_AFTER(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__)) + +#define REQUIRES(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(requires_capability(__VA_ARGS__)) + +#define REQUIRES_SHARED(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(requires_shared_capability(__VA_ARGS__)) + +#define ACQUIRE(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(acquire_capability(__VA_ARGS__)) + +#define ACQUIRE_SHARED(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(acquire_shared_capability(__VA_ARGS__)) + +#define RELEASE(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(release_capability(__VA_ARGS__)) + +#define RELEASE_SHARED(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(release_shared_capability(__VA_ARGS__)) + +#define TRY_ACQUIRE(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_capability(__VA_ARGS__)) + +#define TRY_ACQUIRE_SHARED(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_shared_capability(__VA_ARGS__)) + +#define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__)) + +#define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(assert_capability(x)) + +#define ASSERT_SHARED_CAPABILITY(x) \ + THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_capability(x)) + +#define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x)) + +#define NO_THREAD_SAFETY_ANALYSIS \ + THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis) + +namespace benchmark { + +typedef std::condition_variable Condition; + +// NOTE: Wrappers for std::mutex and std::unique_lock are provided so that +// we can annotate them with thread safety attributes and use the +// -Wthread-safety warning with clang. The standard library types cannot be +// used directly because they do not provide the required annotations. +class CAPABILITY("mutex") Mutex { + public: + Mutex() {} + + void lock() ACQUIRE() { mut_.lock(); } + void unlock() RELEASE() { mut_.unlock(); } + std::mutex& native_handle() { return mut_; } + + private: + std::mutex mut_; +}; + +class SCOPED_CAPABILITY MutexLock { + typedef std::unique_lock MutexLockImp; + + public: + MutexLock(Mutex& m) ACQUIRE(m) : ml_(m.native_handle()) {} + ~MutexLock() RELEASE() {} + MutexLockImp& native_handle() { return ml_; } + + private: + MutexLockImp ml_; +}; + +class Barrier { + public: + Barrier(int num_threads) : running_threads_(num_threads) {} + + // Called by each thread + bool wait() EXCLUDES(lock_) { + bool last_thread = false; + { + MutexLock ml(lock_); + last_thread = createBarrier(ml); + } + if (last_thread) phase_condition_.notify_all(); + return last_thread; + } + + void removeThread() EXCLUDES(lock_) { + MutexLock ml(lock_); + --running_threads_; + if (entered_ != 0) phase_condition_.notify_all(); + } + + private: + Mutex lock_; + Condition phase_condition_; + int running_threads_; + + // State for barrier management + int phase_number_ = 0; + int entered_ = 0; // Number of threads that have entered this barrier + + // Enter the barrier and wait until all other threads have also + // entered the barrier. Returns iff this is the last thread to + // enter the barrier. + bool createBarrier(MutexLock& ml) REQUIRES(lock_) { + CHECK_LT(entered_, running_threads_); + entered_++; + if (entered_ < running_threads_) { + // Wait for all threads to enter + int phase_number_cp = phase_number_; + auto cb = [this, phase_number_cp]() { + return this->phase_number_ > phase_number_cp || + entered_ == running_threads_; // A thread has aborted in error + }; + phase_condition_.wait(ml.native_handle(), cb); + if (phase_number_ > phase_number_cp) return false; + // else (running_threads_ == entered_) and we are the last thread. + } + // Last thread has reached the barrier + phase_number_++; + entered_ = 0; + return true; + } +}; + +} // end namespace benchmark + +#endif // BENCHMARK_MUTEX_H_ diff --git a/benchmarks/thirdparty/benchmark/src/re.h b/benchmarks/thirdparty/benchmark/src/re.h new file mode 100755 index 0000000000..fbe25037b4 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/re.h @@ -0,0 +1,158 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef BENCHMARK_RE_H_ +#define BENCHMARK_RE_H_ + +#include "internal_macros.h" + +// clang-format off + +#if !defined(HAVE_STD_REGEX) && \ + !defined(HAVE_GNU_POSIX_REGEX) && \ + !defined(HAVE_POSIX_REGEX) + // No explicit regex selection; detect based on builtin hints. + #if defined(BENCHMARK_OS_LINUX) || defined(BENCHMARK_OS_APPLE) + #define HAVE_POSIX_REGEX 1 + #elif __cplusplus >= 199711L + #define HAVE_STD_REGEX 1 + #endif +#endif + +// Prefer C regex libraries when compiling w/o exceptions so that we can +// correctly report errors. +#if defined(BENCHMARK_HAS_NO_EXCEPTIONS) && \ + defined(BENCHMARK_HAVE_STD_REGEX) && \ + (defined(HAVE_GNU_POSIX_REGEX) || defined(HAVE_POSIX_REGEX)) + #undef HAVE_STD_REGEX +#endif + +#if defined(HAVE_STD_REGEX) + #include +#elif defined(HAVE_GNU_POSIX_REGEX) + #include +#elif defined(HAVE_POSIX_REGEX) + #include +#else +#error No regular expression backend was found! +#endif + +// clang-format on + +#include + +#include "check.h" + +namespace benchmark { + +// A wrapper around the POSIX regular expression API that provides automatic +// cleanup +class Regex { + public: + Regex() : init_(false) {} + + ~Regex(); + + // Compile a regular expression matcher from spec. Returns true on success. + // + // On failure (and if error is not nullptr), error is populated with a human + // readable error message if an error occurs. + bool Init(const std::string& spec, std::string* error); + + // Returns whether str matches the compiled regular expression. + bool Match(const std::string& str); + + private: + bool init_; +// Underlying regular expression object +#if defined(HAVE_STD_REGEX) + std::regex re_; +#elif defined(HAVE_POSIX_REGEX) || defined(HAVE_GNU_POSIX_REGEX) + regex_t re_; +#else +#error No regular expression backend implementation available +#endif +}; + +#if defined(HAVE_STD_REGEX) + +inline bool Regex::Init(const std::string& spec, std::string* error) { +#ifdef BENCHMARK_HAS_NO_EXCEPTIONS + ((void)error); // suppress unused warning +#else + try { +#endif + re_ = std::regex(spec, std::regex_constants::extended); + init_ = true; +#ifndef BENCHMARK_HAS_NO_EXCEPTIONS +} +catch (const std::regex_error& e) { + if (error) { + *error = e.what(); + } +} +#endif +return init_; +} + +inline Regex::~Regex() {} + +inline bool Regex::Match(const std::string& str) { + if (!init_) { + return false; + } + return std::regex_search(str, re_); +} + +#else +inline bool Regex::Init(const std::string& spec, std::string* error) { + int ec = regcomp(&re_, spec.c_str(), REG_EXTENDED | REG_NOSUB); + if (ec != 0) { + if (error) { + size_t needed = regerror(ec, &re_, nullptr, 0); + char* errbuf = new char[needed]; + regerror(ec, &re_, errbuf, needed); + + // regerror returns the number of bytes necessary to null terminate + // the string, so we move that when assigning to error. + CHECK_NE(needed, 0); + error->assign(errbuf, needed - 1); + + delete[] errbuf; + } + + return false; + } + + init_ = true; + return true; +} + +inline Regex::~Regex() { + if (init_) { + regfree(&re_); + } +} + +inline bool Regex::Match(const std::string& str) { + if (!init_) { + return false; + } + return regexec(&re_, str.c_str(), 0, nullptr, 0) == 0; +} +#endif + +} // end namespace benchmark + +#endif // BENCHMARK_RE_H_ diff --git a/benchmarks/thirdparty/benchmark/src/reporter.cc b/benchmarks/thirdparty/benchmark/src/reporter.cc new file mode 100755 index 0000000000..337575a118 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/reporter.cc @@ -0,0 +1,105 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "benchmark/benchmark.h" +#include "timers.h" + +#include + +#include +#include +#include + +#include "check.h" +#include "string_util.h" + +namespace benchmark { + +BenchmarkReporter::BenchmarkReporter() + : output_stream_(&std::cout), error_stream_(&std::cerr) {} + +BenchmarkReporter::~BenchmarkReporter() {} + +void BenchmarkReporter::PrintBasicContext(std::ostream *out, + Context const &context) { + CHECK(out) << "cannot be null"; + auto &Out = *out; + + Out << LocalDateTimeString() << "\n"; + + if (context.executable_name) + Out << "Running " << context.executable_name << "\n"; + + const CPUInfo &info = context.cpu_info; + Out << "Run on (" << info.num_cpus << " X " + << (info.cycles_per_second / 1000000.0) << " MHz CPU " + << ((info.num_cpus > 1) ? "s" : "") << ")\n"; + if (info.caches.size() != 0) { + Out << "CPU Caches:\n"; + for (auto &CInfo : info.caches) { + Out << " L" << CInfo.level << " " << CInfo.type << " " + << (CInfo.size / 1024) << " KiB"; + if (CInfo.num_sharing != 0) + Out << " (x" << (info.num_cpus / CInfo.num_sharing) << ")"; + Out << "\n"; + } + } + if (!info.load_avg.empty()) { + Out << "Load Average: "; + for (auto It = info.load_avg.begin(); It != info.load_avg.end();) { + Out << StrFormat("%.2f", *It++); + if (It != info.load_avg.end()) Out << ", "; + } + Out << "\n"; + } + + if (CPUInfo::Scaling::ENABLED == info.scaling) { + Out << "***WARNING*** CPU scaling is enabled, the benchmark " + "real time measurements may be noisy and will incur extra " + "overhead.\n"; + } + +#ifndef NDEBUG + Out << "***WARNING*** Library was built as DEBUG. Timings may be " + "affected.\n"; +#endif +} + +// No initializer because it's already initialized to NULL. +const char *BenchmarkReporter::Context::executable_name; + +BenchmarkReporter::Context::Context() + : cpu_info(CPUInfo::Get()), sys_info(SystemInfo::Get()) {} + +std::string BenchmarkReporter::Run::benchmark_name() const { + std::string name = run_name.str(); + if (run_type == RT_Aggregate) { + name += "_" + aggregate_name; + } + return name; +} + +double BenchmarkReporter::Run::GetAdjustedRealTime() const { + double new_time = real_accumulated_time * GetTimeUnitMultiplier(time_unit); + if (iterations != 0) new_time /= static_cast(iterations); + return new_time; +} + +double BenchmarkReporter::Run::GetAdjustedCPUTime() const { + double new_time = cpu_accumulated_time * GetTimeUnitMultiplier(time_unit); + if (iterations != 0) new_time /= static_cast(iterations); + return new_time; +} + +} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/sleep.cc b/benchmarks/thirdparty/benchmark/src/sleep.cc new file mode 100755 index 0000000000..1512ac90f7 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/sleep.cc @@ -0,0 +1,51 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "sleep.h" + +#include +#include +#include + +#include "internal_macros.h" + +#ifdef BENCHMARK_OS_WINDOWS +#include +#endif + +namespace benchmark { +#ifdef BENCHMARK_OS_WINDOWS +// Window's Sleep takes milliseconds argument. +void SleepForMilliseconds(int milliseconds) { Sleep(milliseconds); } +void SleepForSeconds(double seconds) { + SleepForMilliseconds(static_cast(kNumMillisPerSecond * seconds)); +} +#else // BENCHMARK_OS_WINDOWS +void SleepForMicroseconds(int microseconds) { + struct timespec sleep_time; + sleep_time.tv_sec = microseconds / kNumMicrosPerSecond; + sleep_time.tv_nsec = (microseconds % kNumMicrosPerSecond) * kNumNanosPerMicro; + while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR) + ; // Ignore signals and wait for the full interval to elapse. +} + +void SleepForMilliseconds(int milliseconds) { + SleepForMicroseconds(milliseconds * kNumMicrosPerMilli); +} + +void SleepForSeconds(double seconds) { + SleepForMicroseconds(static_cast(seconds * kNumMicrosPerSecond)); +} +#endif // BENCHMARK_OS_WINDOWS +} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/sleep.h b/benchmarks/thirdparty/benchmark/src/sleep.h new file mode 100755 index 0000000000..f98551afe2 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/sleep.h @@ -0,0 +1,15 @@ +#ifndef BENCHMARK_SLEEP_H_ +#define BENCHMARK_SLEEP_H_ + +namespace benchmark { +const int kNumMillisPerSecond = 1000; +const int kNumMicrosPerMilli = 1000; +const int kNumMicrosPerSecond = kNumMillisPerSecond * 1000; +const int kNumNanosPerMicro = 1000; +const int kNumNanosPerSecond = kNumNanosPerMicro * kNumMicrosPerSecond; + +void SleepForMilliseconds(int milliseconds); +void SleepForSeconds(double seconds); +} // end namespace benchmark + +#endif // BENCHMARK_SLEEP_H_ diff --git a/benchmarks/thirdparty/benchmark/src/statistics.cc b/benchmarks/thirdparty/benchmark/src/statistics.cc new file mode 100755 index 0000000000..bd5a3d6597 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/statistics.cc @@ -0,0 +1,193 @@ +// Copyright 2016 Ismael Jimenez Martinez. All rights reserved. +// Copyright 2017 Roman Lebedev. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "benchmark/benchmark.h" + +#include +#include +#include +#include +#include +#include "check.h" +#include "statistics.h" + +namespace benchmark { + +auto StatisticsSum = [](const std::vector& v) { + return std::accumulate(v.begin(), v.end(), 0.0); +}; + +double StatisticsMean(const std::vector& v) { + if (v.empty()) return 0.0; + return StatisticsSum(v) * (1.0 / v.size()); +} + +double StatisticsMedian(const std::vector& v) { + if (v.size() < 3) return StatisticsMean(v); + std::vector copy(v); + + auto center = copy.begin() + v.size() / 2; + std::nth_element(copy.begin(), center, copy.end()); + + // did we have an odd number of samples? + // if yes, then center is the median + // it no, then we are looking for the average between center and the value + // before + if (v.size() % 2 == 1) return *center; + auto center2 = copy.begin() + v.size() / 2 - 1; + std::nth_element(copy.begin(), center2, copy.end()); + return (*center + *center2) / 2.0; +} + +// Return the sum of the squares of this sample set +auto SumSquares = [](const std::vector& v) { + return std::inner_product(v.begin(), v.end(), v.begin(), 0.0); +}; + +auto Sqr = [](const double dat) { return dat * dat; }; +auto Sqrt = [](const double dat) { + // Avoid NaN due to imprecision in the calculations + if (dat < 0.0) return 0.0; + return std::sqrt(dat); +}; + +double StatisticsStdDev(const std::vector& v) { + const auto mean = StatisticsMean(v); + if (v.empty()) return mean; + + // Sample standard deviation is undefined for n = 1 + if (v.size() == 1) return 0.0; + + const double avg_squares = SumSquares(v) * (1.0 / v.size()); + return Sqrt(v.size() / (v.size() - 1.0) * (avg_squares - Sqr(mean))); +} + +std::vector ComputeStats( + const std::vector& reports) { + typedef BenchmarkReporter::Run Run; + std::vector results; + + auto error_count = + std::count_if(reports.begin(), reports.end(), + [](Run const& run) { return run.error_occurred; }); + + if (reports.size() - error_count < 2) { + // We don't report aggregated data if there was a single run. + return results; + } + + // Accumulators. + std::vector real_accumulated_time_stat; + std::vector cpu_accumulated_time_stat; + + real_accumulated_time_stat.reserve(reports.size()); + cpu_accumulated_time_stat.reserve(reports.size()); + + // All repetitions should be run with the same number of iterations so we + // can take this information from the first benchmark. + const IterationCount run_iterations = reports.front().iterations; + // create stats for user counters + struct CounterStat { + Counter c; + std::vector s; + }; + std::map counter_stats; + for (Run const& r : reports) { + for (auto const& cnt : r.counters) { + auto it = counter_stats.find(cnt.first); + if (it == counter_stats.end()) { + counter_stats.insert({cnt.first, {cnt.second, std::vector{}}}); + it = counter_stats.find(cnt.first); + it->second.s.reserve(reports.size()); + } else { + CHECK_EQ(counter_stats[cnt.first].c.flags, cnt.second.flags); + } + } + } + + // Populate the accumulators. + for (Run const& run : reports) { + CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name()); + CHECK_EQ(run_iterations, run.iterations); + if (run.error_occurred) continue; + real_accumulated_time_stat.emplace_back(run.real_accumulated_time); + cpu_accumulated_time_stat.emplace_back(run.cpu_accumulated_time); + // user counters + for (auto const& cnt : run.counters) { + auto it = counter_stats.find(cnt.first); + CHECK_NE(it, counter_stats.end()); + it->second.s.emplace_back(cnt.second); + } + } + + // Only add label if it is same for all runs + std::string report_label = reports[0].report_label; + for (std::size_t i = 1; i < reports.size(); i++) { + if (reports[i].report_label != report_label) { + report_label = ""; + break; + } + } + + const double iteration_rescale_factor = + double(reports.size()) / double(run_iterations); + + for (const auto& Stat : *reports[0].statistics) { + // Get the data from the accumulator to BenchmarkReporter::Run's. + Run data; + data.run_name = reports[0].run_name; + data.run_type = BenchmarkReporter::Run::RT_Aggregate; + data.threads = reports[0].threads; + data.repetitions = reports[0].repetitions; + data.repetition_index = Run::no_repetition_index; + data.aggregate_name = Stat.name_; + data.report_label = report_label; + + // It is incorrect to say that an aggregate is computed over + // run's iterations, because those iterations already got averaged. + // Similarly, if there are N repetitions with 1 iterations each, + // an aggregate will be computed over N measurements, not 1. + // Thus it is best to simply use the count of separate reports. + data.iterations = reports.size(); + + data.real_accumulated_time = Stat.compute_(real_accumulated_time_stat); + data.cpu_accumulated_time = Stat.compute_(cpu_accumulated_time_stat); + + // We will divide these times by data.iterations when reporting, but the + // data.iterations is not nessesairly the scale of these measurements, + // because in each repetition, these timers are sum over all the iterations. + // And if we want to say that the stats are over N repetitions and not + // M iterations, we need to multiply these by (N/M). + data.real_accumulated_time *= iteration_rescale_factor; + data.cpu_accumulated_time *= iteration_rescale_factor; + + data.time_unit = reports[0].time_unit; + + // user counters + for (auto const& kv : counter_stats) { + // Do *NOT* rescale the custom counters. They are already properly scaled. + const auto uc_stat = Stat.compute_(kv.second.s); + auto c = Counter(uc_stat, counter_stats[kv.first].c.flags, + counter_stats[kv.first].c.oneK); + data.counters[kv.first] = c; + } + + results.push_back(data); + } + + return results; +} + +} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/statistics.h b/benchmarks/thirdparty/benchmark/src/statistics.h new file mode 100755 index 0000000000..7eccc85536 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/statistics.h @@ -0,0 +1,37 @@ +// Copyright 2016 Ismael Jimenez Martinez. All rights reserved. +// Copyright 2017 Roman Lebedev. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef STATISTICS_H_ +#define STATISTICS_H_ + +#include + +#include "benchmark/benchmark.h" + +namespace benchmark { + +// Return a vector containing the mean, median and standard devation information +// (and any user-specified info) for the specified list of reports. If 'reports' +// contains less than two non-errored runs an empty vector is returned +std::vector ComputeStats( + const std::vector& reports); + +double StatisticsMean(const std::vector& v); +double StatisticsMedian(const std::vector& v); +double StatisticsStdDev(const std::vector& v); + +} // end namespace benchmark + +#endif // STATISTICS_H_ diff --git a/benchmarks/thirdparty/benchmark/src/string_util.cc b/benchmarks/thirdparty/benchmark/src/string_util.cc new file mode 100755 index 0000000000..ac60b5588f --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/string_util.cc @@ -0,0 +1,255 @@ +#include "string_util.h" + +#include +#ifdef BENCHMARK_STL_ANDROID_GNUSTL +#include +#endif +#include +#include +#include +#include +#include + +#include "arraysize.h" + +namespace benchmark { +namespace { + +// kilo, Mega, Giga, Tera, Peta, Exa, Zetta, Yotta. +const char kBigSIUnits[] = "kMGTPEZY"; +// Kibi, Mebi, Gibi, Tebi, Pebi, Exbi, Zebi, Yobi. +const char kBigIECUnits[] = "KMGTPEZY"; +// milli, micro, nano, pico, femto, atto, zepto, yocto. +const char kSmallSIUnits[] = "munpfazy"; + +// We require that all three arrays have the same size. +static_assert(arraysize(kBigSIUnits) == arraysize(kBigIECUnits), + "SI and IEC unit arrays must be the same size"); +static_assert(arraysize(kSmallSIUnits) == arraysize(kBigSIUnits), + "Small SI and Big SI unit arrays must be the same size"); + +static const int64_t kUnitsSize = arraysize(kBigSIUnits); + +void ToExponentAndMantissa(double val, double thresh, int precision, + double one_k, std::string* mantissa, + int64_t* exponent) { + std::stringstream mantissa_stream; + + if (val < 0) { + mantissa_stream << "-"; + val = -val; + } + + // Adjust threshold so that it never excludes things which can't be rendered + // in 'precision' digits. + const double adjusted_threshold = + std::max(thresh, 1.0 / std::pow(10.0, precision)); + const double big_threshold = adjusted_threshold * one_k; + const double small_threshold = adjusted_threshold; + // Values in ]simple_threshold,small_threshold[ will be printed as-is + const double simple_threshold = 0.01; + + if (val > big_threshold) { + // Positive powers + double scaled = val; + for (size_t i = 0; i < arraysize(kBigSIUnits); ++i) { + scaled /= one_k; + if (scaled <= big_threshold) { + mantissa_stream << scaled; + *exponent = i + 1; + *mantissa = mantissa_stream.str(); + return; + } + } + mantissa_stream << val; + *exponent = 0; + } else if (val < small_threshold) { + // Negative powers + if (val < simple_threshold) { + double scaled = val; + for (size_t i = 0; i < arraysize(kSmallSIUnits); ++i) { + scaled *= one_k; + if (scaled >= small_threshold) { + mantissa_stream << scaled; + *exponent = -static_cast(i + 1); + *mantissa = mantissa_stream.str(); + return; + } + } + } + mantissa_stream << val; + *exponent = 0; + } else { + mantissa_stream << val; + *exponent = 0; + } + *mantissa = mantissa_stream.str(); +} + +std::string ExponentToPrefix(int64_t exponent, bool iec) { + if (exponent == 0) return ""; + + const int64_t index = (exponent > 0 ? exponent - 1 : -exponent - 1); + if (index >= kUnitsSize) return ""; + + const char* array = + (exponent > 0 ? (iec ? kBigIECUnits : kBigSIUnits) : kSmallSIUnits); + if (iec) + return array[index] + std::string("i"); + else + return std::string(1, array[index]); +} + +std::string ToBinaryStringFullySpecified(double value, double threshold, + int precision, double one_k = 1024.0) { + std::string mantissa; + int64_t exponent; + ToExponentAndMantissa(value, threshold, precision, one_k, &mantissa, + &exponent); + return mantissa + ExponentToPrefix(exponent, false); +} + +} // end namespace + +void AppendHumanReadable(int n, std::string* str) { + std::stringstream ss; + // Round down to the nearest SI prefix. + ss << ToBinaryStringFullySpecified(n, 1.0, 0); + *str += ss.str(); +} + +std::string HumanReadableNumber(double n, double one_k) { + // 1.1 means that figures up to 1.1k should be shown with the next unit down; + // this softens edge effects. + // 1 means that we should show one decimal place of precision. + return ToBinaryStringFullySpecified(n, 1.1, 1, one_k); +} + +std::string StrFormatImp(const char* msg, va_list args) { + // we might need a second shot at this, so pre-emptivly make a copy + va_list args_cp; + va_copy(args_cp, args); + + // TODO(ericwf): use std::array for first attempt to avoid one memory + // allocation guess what the size might be + std::array local_buff; + std::size_t size = local_buff.size(); + // 2015-10-08: vsnprintf is used instead of snd::vsnprintf due to a limitation + // in the android-ndk + auto ret = vsnprintf(local_buff.data(), size, msg, args_cp); + + va_end(args_cp); + + // handle empty expansion + if (ret == 0) return std::string{}; + if (static_cast(ret) < size) + return std::string(local_buff.data()); + + // we did not provide a long enough buffer on our first attempt. + // add 1 to size to account for null-byte in size cast to prevent overflow + size = static_cast(ret) + 1; + auto buff_ptr = std::unique_ptr(new char[size]); + // 2015-10-08: vsnprintf is used instead of snd::vsnprintf due to a limitation + // in the android-ndk + ret = vsnprintf(buff_ptr.get(), size, msg, args); + return std::string(buff_ptr.get()); +} + +std::string StrFormat(const char* format, ...) { + va_list args; + va_start(args, format); + std::string tmp = StrFormatImp(format, args); + va_end(args); + return tmp; +} + +#ifdef BENCHMARK_STL_ANDROID_GNUSTL +/* + * GNU STL in Android NDK lacks support for some C++11 functions, including + * stoul, stoi, stod. We reimplement them here using C functions strtoul, + * strtol, strtod. Note that reimplemented functions are in benchmark:: + * namespace, not std:: namespace. + */ +unsigned long stoul(const std::string& str, size_t* pos, int base) { + /* Record previous errno */ + const int oldErrno = errno; + errno = 0; + + const char* strStart = str.c_str(); + char* strEnd = const_cast(strStart); + const unsigned long result = strtoul(strStart, &strEnd, base); + + const int strtoulErrno = errno; + /* Restore previous errno */ + errno = oldErrno; + + /* Check for errors and return */ + if (strtoulErrno == ERANGE) { + throw std::out_of_range( + "stoul failed: " + str + " is outside of range of unsigned long"); + } else if (strEnd == strStart || strtoulErrno != 0) { + throw std::invalid_argument( + "stoul failed: " + str + " is not an integer"); + } + if (pos != nullptr) { + *pos = static_cast(strEnd - strStart); + } + return result; +} + +int stoi(const std::string& str, size_t* pos, int base) { + /* Record previous errno */ + const int oldErrno = errno; + errno = 0; + + const char* strStart = str.c_str(); + char* strEnd = const_cast(strStart); + const long result = strtol(strStart, &strEnd, base); + + const int strtolErrno = errno; + /* Restore previous errno */ + errno = oldErrno; + + /* Check for errors and return */ + if (strtolErrno == ERANGE || long(int(result)) != result) { + throw std::out_of_range( + "stoul failed: " + str + " is outside of range of int"); + } else if (strEnd == strStart || strtolErrno != 0) { + throw std::invalid_argument( + "stoul failed: " + str + " is not an integer"); + } + if (pos != nullptr) { + *pos = static_cast(strEnd - strStart); + } + return int(result); +} + +double stod(const std::string& str, size_t* pos) { + /* Record previous errno */ + const int oldErrno = errno; + errno = 0; + + const char* strStart = str.c_str(); + char* strEnd = const_cast(strStart); + const double result = strtod(strStart, &strEnd); + + /* Restore previous errno */ + const int strtodErrno = errno; + errno = oldErrno; + + /* Check for errors and return */ + if (strtodErrno == ERANGE) { + throw std::out_of_range( + "stoul failed: " + str + " is outside of range of int"); + } else if (strEnd == strStart || strtodErrno != 0) { + throw std::invalid_argument( + "stoul failed: " + str + " is not an integer"); + } + if (pos != nullptr) { + *pos = static_cast(strEnd - strStart); + } + return result; +} +#endif + +} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/string_util.h b/benchmarks/thirdparty/benchmark/src/string_util.h new file mode 100755 index 0000000000..09d7b4bd2a --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/string_util.h @@ -0,0 +1,59 @@ +#ifndef BENCHMARK_STRING_UTIL_H_ +#define BENCHMARK_STRING_UTIL_H_ + +#include +#include +#include +#include "internal_macros.h" + +namespace benchmark { + +void AppendHumanReadable(int n, std::string* str); + +std::string HumanReadableNumber(double n, double one_k = 1024.0); + +#if defined(__MINGW32__) +__attribute__((format(__MINGW_PRINTF_FORMAT, 1, 2))) +#elif defined(__GNUC__) +__attribute__((format(printf, 1, 2))) +#endif +std::string +StrFormat(const char* format, ...); + +inline std::ostream& StrCatImp(std::ostream& out) BENCHMARK_NOEXCEPT { + return out; +} + +template +inline std::ostream& StrCatImp(std::ostream& out, First&& f, Rest&&... rest) { + out << std::forward(f); + return StrCatImp(out, std::forward(rest)...); +} + +template +inline std::string StrCat(Args&&... args) { + std::ostringstream ss; + StrCatImp(ss, std::forward(args)...); + return ss.str(); +} + +#ifdef BENCHMARK_STL_ANDROID_GNUSTL +/* + * GNU STL in Android NDK lacks support for some C++11 functions, including + * stoul, stoi, stod. We reimplement them here using C functions strtoul, + * strtol, strtod. Note that reimplemented functions are in benchmark:: + * namespace, not std:: namespace. + */ +unsigned long stoul(const std::string& str, size_t* pos = nullptr, + int base = 10); +int stoi(const std::string& str, size_t* pos = nullptr, int base = 10); +double stod(const std::string& str, size_t* pos = nullptr); +#else +using std::stoul; +using std::stoi; +using std::stod; +#endif + +} // end namespace benchmark + +#endif // BENCHMARK_STRING_UTIL_H_ diff --git a/benchmarks/thirdparty/benchmark/src/sysinfo.cc b/benchmarks/thirdparty/benchmark/src/sysinfo.cc new file mode 100755 index 0000000000..8bab9320f1 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/sysinfo.cc @@ -0,0 +1,712 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "internal_macros.h" + +#ifdef BENCHMARK_OS_WINDOWS +#include +#undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA +#include +#include +#include +#else +#include +#ifndef BENCHMARK_OS_FUCHSIA +#include +#endif +#include +#include // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD +#include +#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX || \ + defined BENCHMARK_OS_NETBSD || defined BENCHMARK_OS_OPENBSD +#define BENCHMARK_HAS_SYSCTL +#include +#endif +#endif +#if defined(BENCHMARK_OS_SOLARIS) +#include +#endif +#if defined(BENCHMARK_OS_QNX) +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "check.h" +#include "cycleclock.h" +#include "internal_macros.h" +#include "log.h" +#include "sleep.h" +#include "string_util.h" + +namespace benchmark { +namespace { + +void PrintImp(std::ostream& out) { out << std::endl; } + +template +void PrintImp(std::ostream& out, First&& f, Rest&&... rest) { + out << std::forward(f); + PrintImp(out, std::forward(rest)...); +} + +template +BENCHMARK_NORETURN void PrintErrorAndDie(Args&&... args) { + PrintImp(std::cerr, std::forward(args)...); + std::exit(EXIT_FAILURE); +} + +#ifdef BENCHMARK_HAS_SYSCTL + +/// ValueUnion - A type used to correctly alias the byte-for-byte output of +/// `sysctl` with the result type it's to be interpreted as. +struct ValueUnion { + union DataT { + uint32_t uint32_value; + uint64_t uint64_value; + // For correct aliasing of union members from bytes. + char bytes[8]; + }; + using DataPtr = std::unique_ptr; + + // The size of the data union member + its trailing array size. + size_t Size; + DataPtr Buff; + + public: + ValueUnion() : Size(0), Buff(nullptr, &std::free) {} + + explicit ValueUnion(size_t BuffSize) + : Size(sizeof(DataT) + BuffSize), + Buff(::new (std::malloc(Size)) DataT(), &std::free) {} + + ValueUnion(ValueUnion&& other) = default; + + explicit operator bool() const { return bool(Buff); } + + char* data() const { return Buff->bytes; } + + std::string GetAsString() const { return std::string(data()); } + + int64_t GetAsInteger() const { + if (Size == sizeof(Buff->uint32_value)) + return static_cast(Buff->uint32_value); + else if (Size == sizeof(Buff->uint64_value)) + return static_cast(Buff->uint64_value); + BENCHMARK_UNREACHABLE(); + } + + uint64_t GetAsUnsigned() const { + if (Size == sizeof(Buff->uint32_value)) + return Buff->uint32_value; + else if (Size == sizeof(Buff->uint64_value)) + return Buff->uint64_value; + BENCHMARK_UNREACHABLE(); + } + + template + std::array GetAsArray() { + const int ArrSize = sizeof(T) * N; + CHECK_LE(ArrSize, Size); + std::array Arr; + std::memcpy(Arr.data(), data(), ArrSize); + return Arr; + } +}; + +ValueUnion GetSysctlImp(std::string const& Name) { +#if defined BENCHMARK_OS_OPENBSD + int mib[2]; + + mib[0] = CTL_HW; + if ((Name == "hw.ncpu") || (Name == "hw.cpuspeed")){ + ValueUnion buff(sizeof(int)); + + if (Name == "hw.ncpu") { + mib[1] = HW_NCPU; + } else { + mib[1] = HW_CPUSPEED; + } + + if (sysctl(mib, 2, buff.data(), &buff.Size, nullptr, 0) == -1) { + return ValueUnion(); + } + return buff; + } + return ValueUnion(); +#else + size_t CurBuffSize = 0; + if (sysctlbyname(Name.c_str(), nullptr, &CurBuffSize, nullptr, 0) == -1) + return ValueUnion(); + + ValueUnion buff(CurBuffSize); + if (sysctlbyname(Name.c_str(), buff.data(), &buff.Size, nullptr, 0) == 0) + return buff; + return ValueUnion(); +#endif +} + +BENCHMARK_MAYBE_UNUSED +bool GetSysctl(std::string const& Name, std::string* Out) { + Out->clear(); + auto Buff = GetSysctlImp(Name); + if (!Buff) return false; + Out->assign(Buff.data()); + return true; +} + +template ::value>::type> +bool GetSysctl(std::string const& Name, Tp* Out) { + *Out = 0; + auto Buff = GetSysctlImp(Name); + if (!Buff) return false; + *Out = static_cast(Buff.GetAsUnsigned()); + return true; +} + +template +bool GetSysctl(std::string const& Name, std::array* Out) { + auto Buff = GetSysctlImp(Name); + if (!Buff) return false; + *Out = Buff.GetAsArray(); + return true; +} +#endif + +template +bool ReadFromFile(std::string const& fname, ArgT* arg) { + *arg = ArgT(); + std::ifstream f(fname.c_str()); + if (!f.is_open()) return false; + f >> *arg; + return f.good(); +} + +CPUInfo::Scaling CpuScaling(int num_cpus) { + // We don't have a valid CPU count, so don't even bother. + if (num_cpus <= 0) return CPUInfo::Scaling::UNKNOWN; +#ifdef BENCHMARK_OS_QNX + return CPUInfo::Scaling::UNKNOWN; +#endif +#ifndef BENCHMARK_OS_WINDOWS + // On Linux, the CPUfreq subsystem exposes CPU information as files on the + // local file system. If reading the exported files fails, then we may not be + // running on Linux, so we silently ignore all the read errors. + std::string res; + for (int cpu = 0; cpu < num_cpus; ++cpu) { + std::string governor_file = + StrCat("/sys/devices/system/cpu/cpu", cpu, "/cpufreq/scaling_governor"); + if (ReadFromFile(governor_file, &res) && res != "performance") return CPUInfo::Scaling::ENABLED; + } + return CPUInfo::Scaling::DISABLED; +#endif + return CPUInfo::Scaling::UNKNOWN; +} + +int CountSetBitsInCPUMap(std::string Val) { + auto CountBits = [](std::string Part) { + using CPUMask = std::bitset; + Part = "0x" + Part; + CPUMask Mask(benchmark::stoul(Part, nullptr, 16)); + return static_cast(Mask.count()); + }; + size_t Pos; + int total = 0; + while ((Pos = Val.find(',')) != std::string::npos) { + total += CountBits(Val.substr(0, Pos)); + Val = Val.substr(Pos + 1); + } + if (!Val.empty()) { + total += CountBits(Val); + } + return total; +} + +BENCHMARK_MAYBE_UNUSED +std::vector GetCacheSizesFromKVFS() { + std::vector res; + std::string dir = "/sys/devices/system/cpu/cpu0/cache/"; + int Idx = 0; + while (true) { + CPUInfo::CacheInfo info; + std::string FPath = StrCat(dir, "index", Idx++, "/"); + std::ifstream f(StrCat(FPath, "size").c_str()); + if (!f.is_open()) break; + std::string suffix; + f >> info.size; + if (f.fail()) + PrintErrorAndDie("Failed while reading file '", FPath, "size'"); + if (f.good()) { + f >> suffix; + if (f.bad()) + PrintErrorAndDie( + "Invalid cache size format: failed to read size suffix"); + else if (f && suffix != "K") + PrintErrorAndDie("Invalid cache size format: Expected bytes ", suffix); + else if (suffix == "K") + info.size *= 1024; + } + if (!ReadFromFile(StrCat(FPath, "type"), &info.type)) + PrintErrorAndDie("Failed to read from file ", FPath, "type"); + if (!ReadFromFile(StrCat(FPath, "level"), &info.level)) + PrintErrorAndDie("Failed to read from file ", FPath, "level"); + std::string map_str; + if (!ReadFromFile(StrCat(FPath, "shared_cpu_map"), &map_str)) + PrintErrorAndDie("Failed to read from file ", FPath, "shared_cpu_map"); + info.num_sharing = CountSetBitsInCPUMap(map_str); + res.push_back(info); + } + + return res; +} + +#ifdef BENCHMARK_OS_MACOSX +std::vector GetCacheSizesMacOSX() { + std::vector res; + std::array CacheCounts{{0, 0, 0, 0}}; + GetSysctl("hw.cacheconfig", &CacheCounts); + + struct { + std::string name; + std::string type; + int level; + uint64_t num_sharing; + } Cases[] = {{"hw.l1dcachesize", "Data", 1, CacheCounts[1]}, + {"hw.l1icachesize", "Instruction", 1, CacheCounts[1]}, + {"hw.l2cachesize", "Unified", 2, CacheCounts[2]}, + {"hw.l3cachesize", "Unified", 3, CacheCounts[3]}}; + for (auto& C : Cases) { + int val; + if (!GetSysctl(C.name, &val)) continue; + CPUInfo::CacheInfo info; + info.type = C.type; + info.level = C.level; + info.size = val; + info.num_sharing = static_cast(C.num_sharing); + res.push_back(std::move(info)); + } + return res; +} +#elif defined(BENCHMARK_OS_WINDOWS) +std::vector GetCacheSizesWindows() { + std::vector res; + DWORD buffer_size = 0; + using PInfo = SYSTEM_LOGICAL_PROCESSOR_INFORMATION; + using CInfo = CACHE_DESCRIPTOR; + + using UPtr = std::unique_ptr; + GetLogicalProcessorInformation(nullptr, &buffer_size); + UPtr buff((PInfo*)malloc(buffer_size), &std::free); + if (!GetLogicalProcessorInformation(buff.get(), &buffer_size)) + PrintErrorAndDie("Failed during call to GetLogicalProcessorInformation: ", + GetLastError()); + + PInfo* it = buff.get(); + PInfo* end = buff.get() + (buffer_size / sizeof(PInfo)); + + for (; it != end; ++it) { + if (it->Relationship != RelationCache) continue; + using BitSet = std::bitset; + BitSet B(it->ProcessorMask); + // To prevent duplicates, only consider caches where CPU 0 is specified + if (!B.test(0)) continue; + CInfo* Cache = &it->Cache; + CPUInfo::CacheInfo C; + C.num_sharing = static_cast(B.count()); + C.level = Cache->Level; + C.size = Cache->Size; + switch (Cache->Type) { + case CacheUnified: + C.type = "Unified"; + break; + case CacheInstruction: + C.type = "Instruction"; + break; + case CacheData: + C.type = "Data"; + break; + case CacheTrace: + C.type = "Trace"; + break; + default: + C.type = "Unknown"; + break; + } + res.push_back(C); + } + return res; +} +#elif BENCHMARK_OS_QNX +std::vector GetCacheSizesQNX() { + std::vector res; + struct cacheattr_entry *cache = SYSPAGE_ENTRY(cacheattr); + uint32_t const elsize = SYSPAGE_ELEMENT_SIZE(cacheattr); + int num = SYSPAGE_ENTRY_SIZE(cacheattr) / elsize ; + for(int i = 0; i < num; ++i ) { + CPUInfo::CacheInfo info; + switch (cache->flags){ + case CACHE_FLAG_INSTR : + info.type = "Instruction"; + info.level = 1; + break; + case CACHE_FLAG_DATA : + info.type = "Data"; + info.level = 1; + break; + case CACHE_FLAG_UNIFIED : + info.type = "Unified"; + info.level = 2; + break; + case CACHE_FLAG_SHARED : + info.type = "Shared"; + info.level = 3; + break; + default : + continue; + break; + } + info.size = cache->line_size * cache->num_lines; + info.num_sharing = 0; + res.push_back(std::move(info)); + cache = SYSPAGE_ARRAY_ADJ_OFFSET(cacheattr, cache, elsize); + } + return res; +} +#endif + +std::vector GetCacheSizes() { +#ifdef BENCHMARK_OS_MACOSX + return GetCacheSizesMacOSX(); +#elif defined(BENCHMARK_OS_WINDOWS) + return GetCacheSizesWindows(); +#elif defined(BENCHMARK_OS_QNX) + return GetCacheSizesQNX(); +#else + return GetCacheSizesFromKVFS(); +#endif +} + +std::string GetSystemName() { +#if defined(BENCHMARK_OS_WINDOWS) + std::string str; + const unsigned COUNT = MAX_COMPUTERNAME_LENGTH+1; + TCHAR hostname[COUNT] = {'\0'}; + DWORD DWCOUNT = COUNT; + if (!GetComputerName(hostname, &DWCOUNT)) + return std::string(""); +#ifndef UNICODE + str = std::string(hostname, DWCOUNT); +#else + //Using wstring_convert, Is deprecated in C++17 + using convert_type = std::codecvt_utf8; + std::wstring_convert converter; + std::wstring wStr(hostname, DWCOUNT); + str = converter.to_bytes(wStr); +#endif + return str; +#else // defined(BENCHMARK_OS_WINDOWS) +#ifndef HOST_NAME_MAX +#ifdef BENCHMARK_HAS_SYSCTL // BSD/Mac Doesnt have HOST_NAME_MAX defined +#define HOST_NAME_MAX 64 +#elif defined(BENCHMARK_OS_NACL) +#define HOST_NAME_MAX 64 +#elif defined(BENCHMARK_OS_QNX) +#define HOST_NAME_MAX 154 +#elif defined(BENCHMARK_OS_RTEMS) +#define HOST_NAME_MAX 256 +#else +#warning "HOST_NAME_MAX not defined. using 64" +#define HOST_NAME_MAX 64 +#endif +#endif // def HOST_NAME_MAX + char hostname[HOST_NAME_MAX]; + int retVal = gethostname(hostname, HOST_NAME_MAX); + if (retVal != 0) return std::string(""); + return std::string(hostname); +#endif // Catch-all POSIX block. +} + +int GetNumCPUs() { +#ifdef BENCHMARK_HAS_SYSCTL + int NumCPU = -1; + if (GetSysctl("hw.ncpu", &NumCPU)) return NumCPU; + fprintf(stderr, "Err: %s\n", strerror(errno)); + std::exit(EXIT_FAILURE); +#elif defined(BENCHMARK_OS_WINDOWS) + SYSTEM_INFO sysinfo; + // Use memset as opposed to = {} to avoid GCC missing initializer false + // positives. + std::memset(&sysinfo, 0, sizeof(SYSTEM_INFO)); + GetSystemInfo(&sysinfo); + return sysinfo.dwNumberOfProcessors; // number of logical + // processors in the current + // group +#elif defined(BENCHMARK_OS_SOLARIS) + // Returns -1 in case of a failure. + int NumCPU = sysconf(_SC_NPROCESSORS_ONLN); + if (NumCPU < 0) { + fprintf(stderr, + "sysconf(_SC_NPROCESSORS_ONLN) failed with error: %s\n", + strerror(errno)); + } + return NumCPU; +#elif defined(BENCHMARK_OS_QNX) + return static_cast(_syspage_ptr->num_cpu); +#else + int NumCPUs = 0; + int MaxID = -1; + std::ifstream f("/proc/cpuinfo"); + if (!f.is_open()) { + std::cerr << "failed to open /proc/cpuinfo\n"; + return -1; + } + const std::string Key = "processor"; + std::string ln; + while (std::getline(f, ln)) { + if (ln.empty()) continue; + size_t SplitIdx = ln.find(':'); + std::string value; +#if defined(__s390__) + // s390 has another format in /proc/cpuinfo + // it needs to be parsed differently + if (SplitIdx != std::string::npos) value = ln.substr(Key.size()+1,SplitIdx-Key.size()-1); +#else + if (SplitIdx != std::string::npos) value = ln.substr(SplitIdx + 1); +#endif + if (ln.size() >= Key.size() && ln.compare(0, Key.size(), Key) == 0) { + NumCPUs++; + if (!value.empty()) { + int CurID = benchmark::stoi(value); + MaxID = std::max(CurID, MaxID); + } + } + } + if (f.bad()) { + std::cerr << "Failure reading /proc/cpuinfo\n"; + return -1; + } + if (!f.eof()) { + std::cerr << "Failed to read to end of /proc/cpuinfo\n"; + return -1; + } + f.close(); + + if ((MaxID + 1) != NumCPUs) { + fprintf(stderr, + "CPU ID assignments in /proc/cpuinfo seem messed up." + " This is usually caused by a bad BIOS.\n"); + } + return NumCPUs; +#endif + BENCHMARK_UNREACHABLE(); +} + +double GetCPUCyclesPerSecond() { +#if defined BENCHMARK_OS_LINUX || defined BENCHMARK_OS_CYGWIN + long freq; + + // If the kernel is exporting the tsc frequency use that. There are issues + // where cpuinfo_max_freq cannot be relied on because the BIOS may be + // exporintg an invalid p-state (on x86) or p-states may be used to put the + // processor in a new mode (turbo mode). Essentially, those frequencies + // cannot always be relied upon. The same reasons apply to /proc/cpuinfo as + // well. + if (ReadFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz", &freq) + // If CPU scaling is in effect, we want to use the *maximum* frequency, + // not whatever CPU speed some random processor happens to be using now. + || ReadFromFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq", + &freq)) { + // The value is in kHz (as the file name suggests). For example, on a + // 2GHz warpstation, the file contains the value "2000000". + return freq * 1000.0; + } + + const double error_value = -1; + double bogo_clock = error_value; + + std::ifstream f("/proc/cpuinfo"); + if (!f.is_open()) { + std::cerr << "failed to open /proc/cpuinfo\n"; + return error_value; + } + + auto startsWithKey = [](std::string const& Value, std::string const& Key) { + if (Key.size() > Value.size()) return false; + auto Cmp = [&](char X, char Y) { + return std::tolower(X) == std::tolower(Y); + }; + return std::equal(Key.begin(), Key.end(), Value.begin(), Cmp); + }; + + std::string ln; + while (std::getline(f, ln)) { + if (ln.empty()) continue; + size_t SplitIdx = ln.find(':'); + std::string value; + if (SplitIdx != std::string::npos) value = ln.substr(SplitIdx + 1); + // When parsing the "cpu MHz" and "bogomips" (fallback) entries, we only + // accept positive values. Some environments (virtual machines) report zero, + // which would cause infinite looping in WallTime_Init. + if (startsWithKey(ln, "cpu MHz")) { + if (!value.empty()) { + double cycles_per_second = benchmark::stod(value) * 1000000.0; + if (cycles_per_second > 0) return cycles_per_second; + } + } else if (startsWithKey(ln, "bogomips")) { + if (!value.empty()) { + bogo_clock = benchmark::stod(value) * 1000000.0; + if (bogo_clock < 0.0) bogo_clock = error_value; + } + } + } + if (f.bad()) { + std::cerr << "Failure reading /proc/cpuinfo\n"; + return error_value; + } + if (!f.eof()) { + std::cerr << "Failed to read to end of /proc/cpuinfo\n"; + return error_value; + } + f.close(); + // If we found the bogomips clock, but nothing better, we'll use it (but + // we're not happy about it); otherwise, fallback to the rough estimation + // below. + if (bogo_clock >= 0.0) return bogo_clock; + +#elif defined BENCHMARK_HAS_SYSCTL + constexpr auto* FreqStr = +#if defined(BENCHMARK_OS_FREEBSD) || defined(BENCHMARK_OS_NETBSD) + "machdep.tsc_freq"; +#elif defined BENCHMARK_OS_OPENBSD + "hw.cpuspeed"; +#else + "hw.cpufrequency"; +#endif + unsigned long long hz = 0; +#if defined BENCHMARK_OS_OPENBSD + if (GetSysctl(FreqStr, &hz)) return hz * 1000000; +#else + if (GetSysctl(FreqStr, &hz)) return hz; +#endif + fprintf(stderr, "Unable to determine clock rate from sysctl: %s: %s\n", + FreqStr, strerror(errno)); + +#elif defined BENCHMARK_OS_WINDOWS + // In NT, read MHz from the registry. If we fail to do so or we're in win9x + // then make a crude estimate. + DWORD data, data_size = sizeof(data); + if (IsWindowsXPOrGreater() && + SUCCEEDED( + SHGetValueA(HKEY_LOCAL_MACHINE, + "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", + "~MHz", nullptr, &data, &data_size))) + return static_cast((int64_t)data * + (int64_t)(1000 * 1000)); // was mhz +#elif defined (BENCHMARK_OS_SOLARIS) + kstat_ctl_t *kc = kstat_open(); + if (!kc) { + std::cerr << "failed to open /dev/kstat\n"; + return -1; + } + kstat_t *ksp = kstat_lookup(kc, (char*)"cpu_info", -1, (char*)"cpu_info0"); + if (!ksp) { + std::cerr << "failed to lookup in /dev/kstat\n"; + return -1; + } + if (kstat_read(kc, ksp, NULL) < 0) { + std::cerr << "failed to read from /dev/kstat\n"; + return -1; + } + kstat_named_t *knp = + (kstat_named_t*)kstat_data_lookup(ksp, (char*)"current_clock_Hz"); + if (!knp) { + std::cerr << "failed to lookup data in /dev/kstat\n"; + return -1; + } + if (knp->data_type != KSTAT_DATA_UINT64) { + std::cerr << "current_clock_Hz is of unexpected data type: " + << knp->data_type << "\n"; + return -1; + } + double clock_hz = knp->value.ui64; + kstat_close(kc); + return clock_hz; +#elif defined (BENCHMARK_OS_QNX) + return static_cast((int64_t)(SYSPAGE_ENTRY(cpuinfo)->speed) * + (int64_t)(1000 * 1000)); +#endif + // If we've fallen through, attempt to roughly estimate the CPU clock rate. + const int estimate_time_ms = 1000; + const auto start_ticks = cycleclock::Now(); + SleepForMilliseconds(estimate_time_ms); + return static_cast(cycleclock::Now() - start_ticks); +} + +std::vector GetLoadAvg() { +#if (defined BENCHMARK_OS_FREEBSD || defined(BENCHMARK_OS_LINUX) || \ + defined BENCHMARK_OS_MACOSX || defined BENCHMARK_OS_NETBSD || \ + defined BENCHMARK_OS_OPENBSD) && !defined(__ANDROID__) + constexpr int kMaxSamples = 3; + std::vector res(kMaxSamples, 0.0); + const int nelem = getloadavg(res.data(), kMaxSamples); + if (nelem < 1) { + res.clear(); + } else { + res.resize(nelem); + } + return res; +#else + return {}; +#endif +} + +} // end namespace + +const CPUInfo& CPUInfo::Get() { + static const CPUInfo* info = new CPUInfo(); + return *info; +} + +CPUInfo::CPUInfo() + : num_cpus(GetNumCPUs()), + cycles_per_second(GetCPUCyclesPerSecond()), + caches(GetCacheSizes()), + scaling(CpuScaling(num_cpus)), + load_avg(GetLoadAvg()) {} + + +const SystemInfo& SystemInfo::Get() { + static const SystemInfo* info = new SystemInfo(); + return *info; +} + +SystemInfo::SystemInfo() : name(GetSystemName()) {} +} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/thread_manager.h b/benchmarks/thirdparty/benchmark/src/thread_manager.h new file mode 100755 index 0000000000..28e2dd53af --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/thread_manager.h @@ -0,0 +1,64 @@ +#ifndef BENCHMARK_THREAD_MANAGER_H +#define BENCHMARK_THREAD_MANAGER_H + +#include + +#include "benchmark/benchmark.h" +#include "mutex.h" + +namespace benchmark { +namespace internal { + +class ThreadManager { + public: + explicit ThreadManager(int num_threads) + : alive_threads_(num_threads), start_stop_barrier_(num_threads) {} + + Mutex& GetBenchmarkMutex() const RETURN_CAPABILITY(benchmark_mutex_) { + return benchmark_mutex_; + } + + bool StartStopBarrier() EXCLUDES(end_cond_mutex_) { + return start_stop_barrier_.wait(); + } + + void NotifyThreadComplete() EXCLUDES(end_cond_mutex_) { + start_stop_barrier_.removeThread(); + if (--alive_threads_ == 0) { + MutexLock lock(end_cond_mutex_); + end_condition_.notify_all(); + } + } + + void WaitForAllThreads() EXCLUDES(end_cond_mutex_) { + MutexLock lock(end_cond_mutex_); + end_condition_.wait(lock.native_handle(), + [this]() { return alive_threads_ == 0; }); + } + + public: + struct Result { + IterationCount iterations = 0; + double real_time_used = 0; + double cpu_time_used = 0; + double manual_time_used = 0; + int64_t complexity_n = 0; + std::string report_label_; + std::string error_message_; + bool has_error_ = false; + UserCounters counters; + }; + GUARDED_BY(GetBenchmarkMutex()) Result results; + + private: + mutable Mutex benchmark_mutex_; + std::atomic alive_threads_; + Barrier start_stop_barrier_; + Mutex end_cond_mutex_; + Condition end_condition_; +}; + +} // namespace internal +} // namespace benchmark + +#endif // BENCHMARK_THREAD_MANAGER_H diff --git a/benchmarks/thirdparty/benchmark/src/thread_timer.h b/benchmarks/thirdparty/benchmark/src/thread_timer.h new file mode 100755 index 0000000000..1703ca0d6f --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/thread_timer.h @@ -0,0 +1,86 @@ +#ifndef BENCHMARK_THREAD_TIMER_H +#define BENCHMARK_THREAD_TIMER_H + +#include "check.h" +#include "timers.h" + +namespace benchmark { +namespace internal { + +class ThreadTimer { + explicit ThreadTimer(bool measure_process_cpu_time_) + : measure_process_cpu_time(measure_process_cpu_time_) {} + + public: + static ThreadTimer Create() { + return ThreadTimer(/*measure_process_cpu_time_=*/false); + } + static ThreadTimer CreateProcessCpuTime() { + return ThreadTimer(/*measure_process_cpu_time_=*/true); + } + + // Called by each thread + void StartTimer() { + running_ = true; + start_real_time_ = ChronoClockNow(); + start_cpu_time_ = ReadCpuTimerOfChoice(); + } + + // Called by each thread + void StopTimer() { + CHECK(running_); + running_ = false; + real_time_used_ += ChronoClockNow() - start_real_time_; + // Floating point error can result in the subtraction producing a negative + // time. Guard against that. + cpu_time_used_ += + std::max(ReadCpuTimerOfChoice() - start_cpu_time_, 0); + } + + // Called by each thread + void SetIterationTime(double seconds) { manual_time_used_ += seconds; } + + bool running() const { return running_; } + + // REQUIRES: timer is not running + double real_time_used() const { + CHECK(!running_); + return real_time_used_; + } + + // REQUIRES: timer is not running + double cpu_time_used() const { + CHECK(!running_); + return cpu_time_used_; + } + + // REQUIRES: timer is not running + double manual_time_used() const { + CHECK(!running_); + return manual_time_used_; + } + + private: + double ReadCpuTimerOfChoice() const { + if (measure_process_cpu_time) return ProcessCPUUsage(); + return ThreadCPUUsage(); + } + + // should the thread, or the process, time be measured? + const bool measure_process_cpu_time; + + bool running_ = false; // Is the timer running + double start_real_time_ = 0; // If running_ + double start_cpu_time_ = 0; // If running_ + + // Accumulated time so far (does not contain current slice if running_) + double real_time_used_ = 0; + double cpu_time_used_ = 0; + // Manually set iteration time. User sets this with SetIterationTime(seconds). + double manual_time_used_ = 0; +}; + +} // namespace internal +} // namespace benchmark + +#endif // BENCHMARK_THREAD_TIMER_H diff --git a/benchmarks/thirdparty/benchmark/src/timers.cc b/benchmarks/thirdparty/benchmark/src/timers.cc new file mode 100755 index 0000000000..4f76eddc1d --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/timers.cc @@ -0,0 +1,244 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "timers.h" +#include "internal_macros.h" + +#ifdef BENCHMARK_OS_WINDOWS +#include +#undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA +#include +#include +#else +#include +#ifndef BENCHMARK_OS_FUCHSIA +#include +#endif +#include +#include // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD +#include +#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX +#include +#endif +#if defined(BENCHMARK_OS_MACOSX) +#include +#include +#include +#endif +#endif + +#ifdef BENCHMARK_OS_EMSCRIPTEN +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "check.h" +#include "log.h" +#include "sleep.h" +#include "string_util.h" + +namespace benchmark { + +// Suppress unused warnings on helper functions. +#if defined(__GNUC__) +#pragma GCC diagnostic ignored "-Wunused-function" +#endif + +namespace { +#if defined(BENCHMARK_OS_WINDOWS) +double MakeTime(FILETIME const& kernel_time, FILETIME const& user_time) { + ULARGE_INTEGER kernel; + ULARGE_INTEGER user; + kernel.HighPart = kernel_time.dwHighDateTime; + kernel.LowPart = kernel_time.dwLowDateTime; + user.HighPart = user_time.dwHighDateTime; + user.LowPart = user_time.dwLowDateTime; + return (static_cast(kernel.QuadPart) + + static_cast(user.QuadPart)) * + 1e-7; +} +#elif !defined(BENCHMARK_OS_FUCHSIA) +double MakeTime(struct rusage const& ru) { + return (static_cast(ru.ru_utime.tv_sec) + + static_cast(ru.ru_utime.tv_usec) * 1e-6 + + static_cast(ru.ru_stime.tv_sec) + + static_cast(ru.ru_stime.tv_usec) * 1e-6); +} +#endif +#if defined(BENCHMARK_OS_MACOSX) +double MakeTime(thread_basic_info_data_t const& info) { + return (static_cast(info.user_time.seconds) + + static_cast(info.user_time.microseconds) * 1e-6 + + static_cast(info.system_time.seconds) + + static_cast(info.system_time.microseconds) * 1e-6); +} +#endif +#if defined(CLOCK_PROCESS_CPUTIME_ID) || defined(CLOCK_THREAD_CPUTIME_ID) +double MakeTime(struct timespec const& ts) { + return ts.tv_sec + (static_cast(ts.tv_nsec) * 1e-9); +} +#endif + +BENCHMARK_NORETURN static void DiagnoseAndExit(const char* msg) { + std::cerr << "ERROR: " << msg << std::endl; + std::exit(EXIT_FAILURE); +} + +} // end namespace + +double ProcessCPUUsage() { +#if defined(BENCHMARK_OS_WINDOWS) + HANDLE proc = GetCurrentProcess(); + FILETIME creation_time; + FILETIME exit_time; + FILETIME kernel_time; + FILETIME user_time; + if (GetProcessTimes(proc, &creation_time, &exit_time, &kernel_time, + &user_time)) + return MakeTime(kernel_time, user_time); + DiagnoseAndExit("GetProccessTimes() failed"); +#elif defined(BENCHMARK_OS_EMSCRIPTEN) + // clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) returns 0 on Emscripten. + // Use Emscripten-specific API. Reported CPU time would be exactly the + // same as total time, but this is ok because there aren't long-latency + // syncronous system calls in Emscripten. + return emscripten_get_now() * 1e-3; +#elif defined(CLOCK_PROCESS_CPUTIME_ID) && !defined(BENCHMARK_OS_MACOSX) + // FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See + // https://github.com/google/benchmark/pull/292 + struct timespec spec; + if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &spec) == 0) + return MakeTime(spec); + DiagnoseAndExit("clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) failed"); +#else + struct rusage ru; + if (getrusage(RUSAGE_SELF, &ru) == 0) return MakeTime(ru); + DiagnoseAndExit("getrusage(RUSAGE_SELF, ...) failed"); +#endif +} + +double ThreadCPUUsage() { +#if defined(BENCHMARK_OS_WINDOWS) + HANDLE this_thread = GetCurrentThread(); + FILETIME creation_time; + FILETIME exit_time; + FILETIME kernel_time; + FILETIME user_time; + GetThreadTimes(this_thread, &creation_time, &exit_time, &kernel_time, + &user_time); + return MakeTime(kernel_time, user_time); +#elif defined(BENCHMARK_OS_MACOSX) + // FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See + // https://github.com/google/benchmark/pull/292 + mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT; + thread_basic_info_data_t info; + mach_port_t thread = pthread_mach_thread_np(pthread_self()); + if (thread_info(thread, THREAD_BASIC_INFO, (thread_info_t)&info, &count) == + KERN_SUCCESS) { + return MakeTime(info); + } + DiagnoseAndExit("ThreadCPUUsage() failed when evaluating thread_info"); +#elif defined(BENCHMARK_OS_EMSCRIPTEN) + // Emscripten doesn't support traditional threads + return ProcessCPUUsage(); +#elif defined(BENCHMARK_OS_RTEMS) + // RTEMS doesn't support CLOCK_THREAD_CPUTIME_ID. See + // https://github.com/RTEMS/rtems/blob/master/cpukit/posix/src/clockgettime.c + return ProcessCPUUsage(); +#elif defined(BENCHMARK_OS_SOLARIS) + struct rusage ru; + if (getrusage(RUSAGE_LWP, &ru) == 0) return MakeTime(ru); + DiagnoseAndExit("getrusage(RUSAGE_LWP, ...) failed"); +#elif defined(CLOCK_THREAD_CPUTIME_ID) + struct timespec ts; + if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts) == 0) return MakeTime(ts); + DiagnoseAndExit("clock_gettime(CLOCK_THREAD_CPUTIME_ID, ...) failed"); +#else +#error Per-thread timing is not available on your system. +#endif +} + +std::string LocalDateTimeString() { + // Write the local time in RFC3339 format yyyy-mm-ddTHH:MM:SS+/-HH:MM. + typedef std::chrono::system_clock Clock; + std::time_t now = Clock::to_time_t(Clock::now()); + const std::size_t kTzOffsetLen = 6; + const std::size_t kTimestampLen = 19; + + std::size_t tz_len; + std::size_t timestamp_len; + long int offset_minutes; + char tz_offset_sign = '+'; + // Long enough buffers to avoid format-overflow warnings + char tz_offset[128]; + char storage[128]; + +#if defined(BENCHMARK_OS_WINDOWS) + std::tm *timeinfo_p = ::localtime(&now); +#else + std::tm timeinfo; + std::tm *timeinfo_p = &timeinfo; + ::localtime_r(&now, &timeinfo); +#endif + + tz_len = std::strftime(tz_offset, sizeof(tz_offset), "%z", timeinfo_p); + + if (tz_len < kTzOffsetLen && tz_len > 1) { + // Timezone offset was written. strftime writes offset as +HHMM or -HHMM, + // RFC3339 specifies an offset as +HH:MM or -HH:MM. To convert, we parse + // the offset as an integer, then reprint it to a string. + + offset_minutes = ::strtol(tz_offset, NULL, 10); + if (offset_minutes < 0) { + offset_minutes *= -1; + tz_offset_sign = '-'; + } + + tz_len = ::snprintf(tz_offset, sizeof(tz_offset), "%c%02li:%02li", + tz_offset_sign, offset_minutes / 100, offset_minutes % 100); + CHECK(tz_len == kTzOffsetLen); + ((void)tz_len); // Prevent unused variable warning in optimized build. + } else { + // Unknown offset. RFC3339 specifies that unknown local offsets should be + // written as UTC time with -00:00 timezone. +#if defined(BENCHMARK_OS_WINDOWS) + // Potential race condition if another thread calls localtime or gmtime. + timeinfo_p = ::gmtime(&now); +#else + ::gmtime_r(&now, &timeinfo); +#endif + + strncpy(tz_offset, "-00:00", kTzOffsetLen + 1); + } + + timestamp_len = std::strftime(storage, sizeof(storage), "%Y-%m-%dT%H:%M:%S", + timeinfo_p); + CHECK(timestamp_len == kTimestampLen); + // Prevent unused variable warning in optimized build. + ((void)kTimestampLen); + + std::strncat(storage, tz_offset, sizeof(storage) - timestamp_len - 1); + return std::string(storage); +} + +} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/timers.h b/benchmarks/thirdparty/benchmark/src/timers.h new file mode 100755 index 0000000000..65606ccd93 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/src/timers.h @@ -0,0 +1,48 @@ +#ifndef BENCHMARK_TIMERS_H +#define BENCHMARK_TIMERS_H + +#include +#include + +namespace benchmark { + +// Return the CPU usage of the current process +double ProcessCPUUsage(); + +// Return the CPU usage of the children of the current process +double ChildrenCPUUsage(); + +// Return the CPU usage of the current thread +double ThreadCPUUsage(); + +#if defined(HAVE_STEADY_CLOCK) +template +struct ChooseSteadyClock { + typedef std::chrono::high_resolution_clock type; +}; + +template <> +struct ChooseSteadyClock { + typedef std::chrono::steady_clock type; +}; +#endif + +struct ChooseClockType { +#if defined(HAVE_STEADY_CLOCK) + typedef ChooseSteadyClock<>::type type; +#else + typedef std::chrono::high_resolution_clock type; +#endif +}; + +inline double ChronoClockNow() { + typedef ChooseClockType::type ClockType; + using FpSeconds = std::chrono::duration; + return FpSeconds(ClockType::now().time_since_epoch()).count(); +} + +std::string LocalDateTimeString(); + +} // end namespace benchmark + +#endif // BENCHMARK_TIMERS_H diff --git a/benchmarks/thirdparty/benchmark/test/AssemblyTests.cmake b/benchmarks/thirdparty/benchmark/test/AssemblyTests.cmake new file mode 100755 index 0000000000..3d078586f1 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/test/AssemblyTests.cmake @@ -0,0 +1,46 @@ + +include(split_list) + +set(ASM_TEST_FLAGS "") +check_cxx_compiler_flag(-O3 BENCHMARK_HAS_O3_FLAG) +if (BENCHMARK_HAS_O3_FLAG) + list(APPEND ASM_TEST_FLAGS -O3) +endif() + +check_cxx_compiler_flag(-g0 BENCHMARK_HAS_G0_FLAG) +if (BENCHMARK_HAS_G0_FLAG) + list(APPEND ASM_TEST_FLAGS -g0) +endif() + +check_cxx_compiler_flag(-fno-stack-protector BENCHMARK_HAS_FNO_STACK_PROTECTOR_FLAG) +if (BENCHMARK_HAS_FNO_STACK_PROTECTOR_FLAG) + list(APPEND ASM_TEST_FLAGS -fno-stack-protector) +endif() + +split_list(ASM_TEST_FLAGS) +string(TOUPPER "${CMAKE_CXX_COMPILER_ID}" ASM_TEST_COMPILER) + +macro(add_filecheck_test name) + cmake_parse_arguments(ARG "" "" "CHECK_PREFIXES" ${ARGV}) + add_library(${name} OBJECT ${name}.cc) + set_target_properties(${name} PROPERTIES COMPILE_FLAGS "-S ${ASM_TEST_FLAGS}") + set(ASM_OUTPUT_FILE "${CMAKE_CURRENT_BINARY_DIR}/${name}.s") + add_custom_target(copy_${name} ALL + COMMAND ${PROJECT_SOURCE_DIR}/tools/strip_asm.py + $ + ${ASM_OUTPUT_FILE} + BYPRODUCTS ${ASM_OUTPUT_FILE}) + add_dependencies(copy_${name} ${name}) + if (NOT ARG_CHECK_PREFIXES) + set(ARG_CHECK_PREFIXES "CHECK") + endif() + foreach(prefix ${ARG_CHECK_PREFIXES}) + add_test(NAME run_${name}_${prefix} + COMMAND + ${LLVM_FILECHECK_EXE} ${name}.cc + --input-file=${ASM_OUTPUT_FILE} + --check-prefixes=CHECK,CHECK-${ASM_TEST_COMPILER} + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) + endforeach() +endmacro() + diff --git a/benchmarks/thirdparty/benchmark/test/CMakeLists.txt b/benchmarks/thirdparty/benchmark/test/CMakeLists.txt new file mode 100755 index 0000000000..c1a3a3fc19 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/test/CMakeLists.txt @@ -0,0 +1,263 @@ +# Enable the tests + +find_package(Threads REQUIRED) +include(CheckCXXCompilerFlag) + +# NOTE: Some tests use `` to perform the test. Therefore we must +# strip -DNDEBUG from the default CMake flags in DEBUG mode. +string(TOUPPER "${CMAKE_BUILD_TYPE}" uppercase_CMAKE_BUILD_TYPE) +if( NOT uppercase_CMAKE_BUILD_TYPE STREQUAL "DEBUG" ) + add_definitions( -UNDEBUG ) + add_definitions(-DTEST_BENCHMARK_LIBRARY_HAS_NO_ASSERTIONS) + # Also remove /D NDEBUG to avoid MSVC warnings about conflicting defines. + foreach (flags_var_to_scrub + CMAKE_CXX_FLAGS_RELEASE + CMAKE_CXX_FLAGS_RELWITHDEBINFO + CMAKE_CXX_FLAGS_MINSIZEREL + CMAKE_C_FLAGS_RELEASE + CMAKE_C_FLAGS_RELWITHDEBINFO + CMAKE_C_FLAGS_MINSIZEREL) + string (REGEX REPLACE "(^| )[/-]D *NDEBUG($| )" " " + "${flags_var_to_scrub}" "${${flags_var_to_scrub}}") + endforeach() +endif() + +check_cxx_compiler_flag(-O3 BENCHMARK_HAS_O3_FLAG) +set(BENCHMARK_O3_FLAG "") +if (BENCHMARK_HAS_O3_FLAG) + set(BENCHMARK_O3_FLAG "-O3") +endif() + +# NOTE: These flags must be added after find_package(Threads REQUIRED) otherwise +# they will break the configuration check. +if (DEFINED BENCHMARK_CXX_LINKER_FLAGS) + list(APPEND CMAKE_EXE_LINKER_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}) +endif() + +add_library(output_test_helper STATIC output_test_helper.cc output_test.h) + +macro(compile_benchmark_test name) + add_executable(${name} "${name}.cc") + target_link_libraries(${name} benchmark::benchmark ${CMAKE_THREAD_LIBS_INIT}) +endmacro(compile_benchmark_test) + +macro(compile_benchmark_test_with_main name) + add_executable(${name} "${name}.cc") + target_link_libraries(${name} benchmark::benchmark_main) +endmacro(compile_benchmark_test_with_main) + +macro(compile_output_test name) + add_executable(${name} "${name}.cc" output_test.h) + target_link_libraries(${name} output_test_helper benchmark::benchmark + ${BENCHMARK_CXX_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) +endmacro(compile_output_test) + +# Demonstration executable +compile_benchmark_test(benchmark_test) +add_test(NAME benchmark COMMAND benchmark_test --benchmark_min_time=0.01) + +compile_benchmark_test(filter_test) +macro(add_filter_test name filter expect) + add_test(NAME ${name} COMMAND filter_test --benchmark_min_time=0.01 --benchmark_filter=${filter} ${expect}) + add_test(NAME ${name}_list_only COMMAND filter_test --benchmark_list_tests --benchmark_filter=${filter} ${expect}) +endmacro(add_filter_test) + +add_filter_test(filter_simple "Foo" 3) +add_filter_test(filter_simple_negative "-Foo" 2) +add_filter_test(filter_suffix "BM_.*" 4) +add_filter_test(filter_suffix_negative "-BM_.*" 1) +add_filter_test(filter_regex_all ".*" 5) +add_filter_test(filter_regex_all_negative "-.*" 0) +add_filter_test(filter_regex_blank "" 5) +add_filter_test(filter_regex_blank_negative "-" 0) +add_filter_test(filter_regex_none "monkey" 0) +add_filter_test(filter_regex_none_negative "-monkey" 5) +add_filter_test(filter_regex_wildcard ".*Foo.*" 3) +add_filter_test(filter_regex_wildcard_negative "-.*Foo.*" 2) +add_filter_test(filter_regex_begin "^BM_.*" 4) +add_filter_test(filter_regex_begin_negative "-^BM_.*" 1) +add_filter_test(filter_regex_begin2 "^N" 1) +add_filter_test(filter_regex_begin2_negative "-^N" 4) +add_filter_test(filter_regex_end ".*Ba$" 1) +add_filter_test(filter_regex_end_negative "-.*Ba$" 4) + +compile_benchmark_test(options_test) +add_test(NAME options_benchmarks COMMAND options_test --benchmark_min_time=0.01) + +compile_benchmark_test(basic_test) +add_test(NAME basic_benchmark COMMAND basic_test --benchmark_min_time=0.01) + +compile_benchmark_test(diagnostics_test) +add_test(NAME diagnostics_test COMMAND diagnostics_test --benchmark_min_time=0.01) + +compile_benchmark_test(skip_with_error_test) +add_test(NAME skip_with_error_test COMMAND skip_with_error_test --benchmark_min_time=0.01) + +compile_benchmark_test(donotoptimize_test) +# Some of the issues with DoNotOptimize only occur when optimization is enabled +check_cxx_compiler_flag(-O3 BENCHMARK_HAS_O3_FLAG) +if (BENCHMARK_HAS_O3_FLAG) + set_target_properties(donotoptimize_test PROPERTIES COMPILE_FLAGS "-O3") +endif() +add_test(NAME donotoptimize_test COMMAND donotoptimize_test --benchmark_min_time=0.01) + +compile_benchmark_test(fixture_test) +add_test(NAME fixture_test COMMAND fixture_test --benchmark_min_time=0.01) + +compile_benchmark_test(register_benchmark_test) +add_test(NAME register_benchmark_test COMMAND register_benchmark_test --benchmark_min_time=0.01) + +compile_benchmark_test(map_test) +add_test(NAME map_test COMMAND map_test --benchmark_min_time=0.01) + +compile_benchmark_test(multiple_ranges_test) +add_test(NAME multiple_ranges_test COMMAND multiple_ranges_test --benchmark_min_time=0.01) + +compile_benchmark_test(args_product_test) +add_test(NAME args_product_test COMMAND args_product_test --benchmark_min_time=0.01) + +compile_benchmark_test_with_main(link_main_test) +add_test(NAME link_main_test COMMAND link_main_test --benchmark_min_time=0.01) + +compile_output_test(reporter_output_test) +add_test(NAME reporter_output_test COMMAND reporter_output_test --benchmark_min_time=0.01) + +compile_output_test(templated_fixture_test) +add_test(NAME templated_fixture_test COMMAND templated_fixture_test --benchmark_min_time=0.01) + +compile_output_test(user_counters_test) +add_test(NAME user_counters_test COMMAND user_counters_test --benchmark_min_time=0.01) + +compile_output_test(internal_threading_test) +add_test(NAME internal_threading_test COMMAND internal_threading_test --benchmark_min_time=0.01) + +compile_output_test(report_aggregates_only_test) +add_test(NAME report_aggregates_only_test COMMAND report_aggregates_only_test --benchmark_min_time=0.01) + +compile_output_test(display_aggregates_only_test) +add_test(NAME display_aggregates_only_test COMMAND display_aggregates_only_test --benchmark_min_time=0.01) + +compile_output_test(user_counters_tabular_test) +add_test(NAME user_counters_tabular_test COMMAND user_counters_tabular_test --benchmark_counters_tabular=true --benchmark_min_time=0.01) + +compile_output_test(user_counters_thousands_test) +add_test(NAME user_counters_thousands_test COMMAND user_counters_thousands_test --benchmark_min_time=0.01) + +compile_output_test(memory_manager_test) +add_test(NAME memory_manager_test COMMAND memory_manager_test --benchmark_min_time=0.01) + +check_cxx_compiler_flag(-std=c++03 BENCHMARK_HAS_CXX03_FLAG) +if (BENCHMARK_HAS_CXX03_FLAG) + compile_benchmark_test(cxx03_test) + set_target_properties(cxx03_test + PROPERTIES + CXX_STANDARD 98 + CXX_STANDARD_REQUIRED YES) + # libstdc++ provides different definitions within between dialects. When + # LTO is enabled and -Werror is specified GCC diagnoses this ODR violation + # causing the test to fail to compile. To prevent this we explicitly disable + # the warning. + check_cxx_compiler_flag(-Wno-odr BENCHMARK_HAS_WNO_ODR) + if (BENCHMARK_ENABLE_LTO AND BENCHMARK_HAS_WNO_ODR) + set_target_properties(cxx03_test + PROPERTIES + LINK_FLAGS "-Wno-odr") + endif() + add_test(NAME cxx03 COMMAND cxx03_test --benchmark_min_time=0.01) +endif() + +# Attempt to work around flaky test failures when running on Appveyor servers. +if (DEFINED ENV{APPVEYOR}) + set(COMPLEXITY_MIN_TIME "0.5") +else() + set(COMPLEXITY_MIN_TIME "0.01") +endif() +compile_output_test(complexity_test) +add_test(NAME complexity_benchmark COMMAND complexity_test --benchmark_min_time=${COMPLEXITY_MIN_TIME}) + +############################################################################### +# GoogleTest Unit Tests +############################################################################### + +if (BENCHMARK_ENABLE_GTEST_TESTS) + macro(compile_gtest name) + add_executable(${name} "${name}.cc") + target_link_libraries(${name} benchmark::benchmark + gmock_main ${CMAKE_THREAD_LIBS_INIT}) + endmacro(compile_gtest) + + macro(add_gtest name) + compile_gtest(${name}) + add_test(NAME ${name} COMMAND ${name}) + endmacro() + + add_gtest(benchmark_gtest) + add_gtest(benchmark_name_gtest) + add_gtest(commandlineflags_gtest) + add_gtest(statistics_gtest) + add_gtest(string_util_gtest) +endif(BENCHMARK_ENABLE_GTEST_TESTS) + +############################################################################### +# Assembly Unit Tests +############################################################################### + +if (BENCHMARK_ENABLE_ASSEMBLY_TESTS) + if (NOT LLVM_FILECHECK_EXE) + message(FATAL_ERROR "LLVM FileCheck is required when including this file") + endif() + include(AssemblyTests.cmake) + add_filecheck_test(donotoptimize_assembly_test) + add_filecheck_test(state_assembly_test) + add_filecheck_test(clobber_memory_assembly_test) +endif() + + + +############################################################################### +# Code Coverage Configuration +############################################################################### + +# Add the coverage command(s) +if(CMAKE_BUILD_TYPE) + string(TOLOWER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_LOWER) +endif() +if (${CMAKE_BUILD_TYPE_LOWER} MATCHES "coverage") + find_program(GCOV gcov) + find_program(LCOV lcov) + find_program(GENHTML genhtml) + find_program(CTEST ctest) + if (GCOV AND LCOV AND GENHTML AND CTEST AND HAVE_CXX_FLAG_COVERAGE) + add_custom_command( + OUTPUT ${CMAKE_BINARY_DIR}/lcov/index.html + COMMAND ${LCOV} -q -z -d . + COMMAND ${LCOV} -q --no-external -c -b "${CMAKE_SOURCE_DIR}" -d . -o before.lcov -i + COMMAND ${CTEST} --force-new-ctest-process + COMMAND ${LCOV} -q --no-external -c -b "${CMAKE_SOURCE_DIR}" -d . -o after.lcov + COMMAND ${LCOV} -q -a before.lcov -a after.lcov --output-file final.lcov + COMMAND ${LCOV} -q -r final.lcov "'${CMAKE_SOURCE_DIR}/test/*'" -o final.lcov + COMMAND ${GENHTML} final.lcov -o lcov --demangle-cpp --sort -p "${CMAKE_BINARY_DIR}" -t benchmark + DEPENDS filter_test benchmark_test options_test basic_test fixture_test cxx03_test complexity_test + WORKING_DIRECTORY ${CMAKE_BINARY_DIR} + COMMENT "Running LCOV" + ) + add_custom_target(coverage + DEPENDS ${CMAKE_BINARY_DIR}/lcov/index.html + COMMENT "LCOV report at lcov/index.html" + ) + message(STATUS "Coverage command added") + else() + if (HAVE_CXX_FLAG_COVERAGE) + set(CXX_FLAG_COVERAGE_MESSAGE supported) + else() + set(CXX_FLAG_COVERAGE_MESSAGE unavailable) + endif() + message(WARNING + "Coverage not available:\n" + " gcov: ${GCOV}\n" + " lcov: ${LCOV}\n" + " genhtml: ${GENHTML}\n" + " ctest: ${CTEST}\n" + " --coverage flag: ${CXX_FLAG_COVERAGE_MESSAGE}") + endif() +endif() diff --git a/benchmarks/thirdparty/benchmark/test/args_product_test.cc b/benchmarks/thirdparty/benchmark/test/args_product_test.cc new file mode 100755 index 0000000000..8a859f8415 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/test/args_product_test.cc @@ -0,0 +1,77 @@ +#include "benchmark/benchmark.h" + +#include +#include +#include +#include + +class ArgsProductFixture : public ::benchmark::Fixture { + public: + ArgsProductFixture() + : expectedValues({{0, 100, 2000, 30000}, + {1, 15, 3, 8}, + {1, 15, 3, 9}, + {1, 15, 7, 8}, + {1, 15, 7, 9}, + {1, 15, 10, 8}, + {1, 15, 10, 9}, + {2, 15, 3, 8}, + {2, 15, 3, 9}, + {2, 15, 7, 8}, + {2, 15, 7, 9}, + {2, 15, 10, 8}, + {2, 15, 10, 9}, + {4, 5, 6, 11}}) {} + + void SetUp(const ::benchmark::State& state) { + std::vector ranges = {state.range(0), state.range(1), + state.range(2), state.range(3)}; + + assert(expectedValues.find(ranges) != expectedValues.end()); + + actualValues.insert(ranges); + } + + // NOTE: This is not TearDown as we want to check after _all_ runs are + // complete. + virtual ~ArgsProductFixture() { + if (actualValues != expectedValues) { + std::cout << "EXPECTED\n"; + for (auto v : expectedValues) { + std::cout << "{"; + for (int64_t iv : v) { + std::cout << iv << ", "; + } + std::cout << "}\n"; + } + std::cout << "ACTUAL\n"; + for (auto v : actualValues) { + std::cout << "{"; + for (int64_t iv : v) { + std::cout << iv << ", "; + } + std::cout << "}\n"; + } + } + } + + std::set> expectedValues; + std::set> actualValues; +}; + +BENCHMARK_DEFINE_F(ArgsProductFixture, Empty)(benchmark::State& state) { + for (auto _ : state) { + int64_t product = + state.range(0) * state.range(1) * state.range(2) * state.range(3); + for (int64_t x = 0; x < product; x++) { + benchmark::DoNotOptimize(x); + } + } +} + +BENCHMARK_REGISTER_F(ArgsProductFixture, Empty) + ->Args({0, 100, 2000, 30000}) + ->ArgsProduct({{1, 2}, {15}, {3, 7, 10}, {8, 9}}) + ->Args({4, 5, 6, 11}); + +BENCHMARK_MAIN(); diff --git a/benchmarks/thirdparty/benchmark/test/basic_test.cc b/benchmarks/thirdparty/benchmark/test/basic_test.cc new file mode 100755 index 0000000000..5f3dd1a3ee --- /dev/null +++ b/benchmarks/thirdparty/benchmark/test/basic_test.cc @@ -0,0 +1,136 @@ + +#include "benchmark/benchmark.h" + +#define BASIC_BENCHMARK_TEST(x) BENCHMARK(x)->Arg(8)->Arg(512)->Arg(8192) + +void BM_empty(benchmark::State& state) { + for (auto _ : state) { + benchmark::DoNotOptimize(state.iterations()); + } +} +BENCHMARK(BM_empty); +BENCHMARK(BM_empty)->ThreadPerCpu(); + +void BM_spin_empty(benchmark::State& state) { + for (auto _ : state) { + for (int x = 0; x < state.range(0); ++x) { + benchmark::DoNotOptimize(x); + } + } +} +BASIC_BENCHMARK_TEST(BM_spin_empty); +BASIC_BENCHMARK_TEST(BM_spin_empty)->ThreadPerCpu(); + +void BM_spin_pause_before(benchmark::State& state) { + for (int i = 0; i < state.range(0); ++i) { + benchmark::DoNotOptimize(i); + } + for (auto _ : state) { + for (int i = 0; i < state.range(0); ++i) { + benchmark::DoNotOptimize(i); + } + } +} +BASIC_BENCHMARK_TEST(BM_spin_pause_before); +BASIC_BENCHMARK_TEST(BM_spin_pause_before)->ThreadPerCpu(); + +void BM_spin_pause_during(benchmark::State& state) { + for (auto _ : state) { + state.PauseTiming(); + for (int i = 0; i < state.range(0); ++i) { + benchmark::DoNotOptimize(i); + } + state.ResumeTiming(); + for (int i = 0; i < state.range(0); ++i) { + benchmark::DoNotOptimize(i); + } + } +} +BASIC_BENCHMARK_TEST(BM_spin_pause_during); +BASIC_BENCHMARK_TEST(BM_spin_pause_during)->ThreadPerCpu(); + +void BM_pause_during(benchmark::State& state) { + for (auto _ : state) { + state.PauseTiming(); + state.ResumeTiming(); + } +} +BENCHMARK(BM_pause_during); +BENCHMARK(BM_pause_during)->ThreadPerCpu(); +BENCHMARK(BM_pause_during)->UseRealTime(); +BENCHMARK(BM_pause_during)->UseRealTime()->ThreadPerCpu(); + +void BM_spin_pause_after(benchmark::State& state) { + for (auto _ : state) { + for (int i = 0; i < state.range(0); ++i) { + benchmark::DoNotOptimize(i); + } + } + for (int i = 0; i < state.range(0); ++i) { + benchmark::DoNotOptimize(i); + } +} +BASIC_BENCHMARK_TEST(BM_spin_pause_after); +BASIC_BENCHMARK_TEST(BM_spin_pause_after)->ThreadPerCpu(); + +void BM_spin_pause_before_and_after(benchmark::State& state) { + for (int i = 0; i < state.range(0); ++i) { + benchmark::DoNotOptimize(i); + } + for (auto _ : state) { + for (int i = 0; i < state.range(0); ++i) { + benchmark::DoNotOptimize(i); + } + } + for (int i = 0; i < state.range(0); ++i) { + benchmark::DoNotOptimize(i); + } +} +BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after); +BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after)->ThreadPerCpu(); + +void BM_empty_stop_start(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_empty_stop_start); +BENCHMARK(BM_empty_stop_start)->ThreadPerCpu(); + + +void BM_KeepRunning(benchmark::State& state) { + benchmark::IterationCount iter_count = 0; + assert(iter_count == state.iterations()); + while (state.KeepRunning()) { + ++iter_count; + } + assert(iter_count == state.iterations()); +} +BENCHMARK(BM_KeepRunning); + +void BM_KeepRunningBatch(benchmark::State& state) { + // Choose a prime batch size to avoid evenly dividing max_iterations. + const benchmark::IterationCount batch_size = 101; + benchmark::IterationCount iter_count = 0; + while (state.KeepRunningBatch(batch_size)) { + iter_count += batch_size; + } + assert(state.iterations() == iter_count); +} +BENCHMARK(BM_KeepRunningBatch); + +void BM_RangedFor(benchmark::State& state) { + benchmark::IterationCount iter_count = 0; + for (auto _ : state) { + ++iter_count; + } + assert(iter_count == state.max_iterations); +} +BENCHMARK(BM_RangedFor); + +// Ensure that StateIterator provides all the necessary typedefs required to +// instantiate std::iterator_traits. +static_assert(std::is_same< + typename std::iterator_traits::value_type, + typename benchmark::State::StateIterator::value_type>::value, ""); + +BENCHMARK_MAIN(); diff --git a/benchmarks/thirdparty/benchmark/test/benchmark_gtest.cc b/benchmarks/thirdparty/benchmark/test/benchmark_gtest.cc new file mode 100755 index 0000000000..9557b20ec7 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/test/benchmark_gtest.cc @@ -0,0 +1,128 @@ +#include + +#include "../src/benchmark_register.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace benchmark { +namespace internal { +namespace { + +TEST(AddRangeTest, Simple) { + std::vector dst; + AddRange(&dst, 1, 2, 2); + EXPECT_THAT(dst, testing::ElementsAre(1, 2)); +} + +TEST(AddRangeTest, Simple64) { + std::vector dst; + AddRange(&dst, static_cast(1), static_cast(2), 2); + EXPECT_THAT(dst, testing::ElementsAre(1, 2)); +} + +TEST(AddRangeTest, Advanced) { + std::vector dst; + AddRange(&dst, 5, 15, 2); + EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15)); +} + +TEST(AddRangeTest, Advanced64) { + std::vector dst; + AddRange(&dst, static_cast(5), static_cast(15), 2); + EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15)); +} + +TEST(AddRangeTest, FullRange8) { + std::vector dst; + AddRange(&dst, int8_t{1}, std::numeric_limits::max(), 8); + EXPECT_THAT(dst, testing::ElementsAre(1, 8, 64, 127)); +} + +TEST(AddRangeTest, FullRange64) { + std::vector dst; + AddRange(&dst, int64_t{1}, std::numeric_limits::max(), 1024); + EXPECT_THAT( + dst, testing::ElementsAre(1LL, 1024LL, 1048576LL, 1073741824LL, + 1099511627776LL, 1125899906842624LL, + 1152921504606846976LL, 9223372036854775807LL)); +} + +TEST(AddRangeTest, NegativeRanges) { + std::vector dst; + AddRange(&dst, -8, 0, 2); + EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1, 0)); +} + +TEST(AddRangeTest, StrictlyNegative) { + std::vector dst; + AddRange(&dst, -8, -1, 2); + EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1)); +} + +TEST(AddRangeTest, SymmetricNegativeRanges) { + std::vector dst; + AddRange(&dst, -8, 8, 2); + EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1, 0, 1, 2, 4, 8)); +} + +TEST(AddRangeTest, SymmetricNegativeRangesOddMult) { + std::vector dst; + AddRange(&dst, -30, 32, 5); + EXPECT_THAT(dst, testing::ElementsAre(-30, -25, -5, -1, 0, 1, 5, 25, 32)); +} + +TEST(AddRangeTest, NegativeRangesAsymmetric) { + std::vector dst; + AddRange(&dst, -3, 5, 2); + EXPECT_THAT(dst, testing::ElementsAre(-3, -2, -1, 0, 1, 2, 4, 5)); +} + +TEST(AddRangeTest, NegativeRangesLargeStep) { + // Always include -1, 0, 1 when crossing zero. + std::vector dst; + AddRange(&dst, -8, 8, 10); + EXPECT_THAT(dst, testing::ElementsAre(-8, -1, 0, 1, 8)); +} + +TEST(AddRangeTest, ZeroOnlyRange) { + std::vector dst; + AddRange(&dst, 0, 0, 2); + EXPECT_THAT(dst, testing::ElementsAre(0)); +} + +TEST(AddRangeTest, NegativeRange64) { + std::vector dst; + AddRange(&dst, -4, 4, 2); + EXPECT_THAT(dst, testing::ElementsAre(-4, -2, -1, 0, 1, 2, 4)); +} + +TEST(AddRangeTest, NegativeRangePreservesExistingOrder) { + // If elements already exist in the range, ensure we don't change + // their ordering by adding negative values. + std::vector dst = {1, 2, 3}; + AddRange(&dst, -2, 2, 2); + EXPECT_THAT(dst, testing::ElementsAre(1, 2, 3, -2, -1, 0, 1, 2)); +} + +TEST(AddRangeTest, FullNegativeRange64) { + std::vector dst; + const auto min = std::numeric_limits::min(); + const auto max = std::numeric_limits::max(); + AddRange(&dst, min, max, 1024); + EXPECT_THAT( + dst, testing::ElementsAreArray(std::vector{ + min, -1152921504606846976LL, -1125899906842624LL, + -1099511627776LL, -1073741824LL, -1048576LL, -1024LL, -1LL, 0LL, + 1LL, 1024LL, 1048576LL, 1073741824LL, 1099511627776LL, + 1125899906842624LL, 1152921504606846976LL, max})); +} + +TEST(AddRangeTest, Simple8) { + std::vector dst; + AddRange(&dst, 1, 8, 2); + EXPECT_THAT(dst, testing::ElementsAre(1, 2, 4, 8)); +} + +} // namespace +} // namespace internal +} // namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/test/benchmark_name_gtest.cc b/benchmarks/thirdparty/benchmark/test/benchmark_name_gtest.cc new file mode 100755 index 0000000000..afb401c1f5 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/test/benchmark_name_gtest.cc @@ -0,0 +1,74 @@ +#include "benchmark/benchmark.h" +#include "gtest/gtest.h" + +namespace { + +using namespace benchmark; +using namespace benchmark::internal; + +TEST(BenchmarkNameTest, Empty) { + const auto name = BenchmarkName(); + EXPECT_EQ(name.str(), std::string()); +} + +TEST(BenchmarkNameTest, FunctionName) { + auto name = BenchmarkName(); + name.function_name = "function_name"; + EXPECT_EQ(name.str(), "function_name"); +} + +TEST(BenchmarkNameTest, FunctionNameAndArgs) { + auto name = BenchmarkName(); + name.function_name = "function_name"; + name.args = "some_args:3/4/5"; + EXPECT_EQ(name.str(), "function_name/some_args:3/4/5"); +} + +TEST(BenchmarkNameTest, MinTime) { + auto name = BenchmarkName(); + name.function_name = "function_name"; + name.args = "some_args:3/4"; + name.min_time = "min_time:3.4s"; + EXPECT_EQ(name.str(), "function_name/some_args:3/4/min_time:3.4s"); +} + +TEST(BenchmarkNameTest, Iterations) { + auto name = BenchmarkName(); + name.function_name = "function_name"; + name.min_time = "min_time:3.4s"; + name.iterations = "iterations:42"; + EXPECT_EQ(name.str(), "function_name/min_time:3.4s/iterations:42"); +} + +TEST(BenchmarkNameTest, Repetitions) { + auto name = BenchmarkName(); + name.function_name = "function_name"; + name.min_time = "min_time:3.4s"; + name.repetitions = "repetitions:24"; + EXPECT_EQ(name.str(), "function_name/min_time:3.4s/repetitions:24"); +} + +TEST(BenchmarkNameTest, TimeType) { + auto name = BenchmarkName(); + name.function_name = "function_name"; + name.min_time = "min_time:3.4s"; + name.time_type = "hammer_time"; + EXPECT_EQ(name.str(), "function_name/min_time:3.4s/hammer_time"); +} + +TEST(BenchmarkNameTest, Threads) { + auto name = BenchmarkName(); + name.function_name = "function_name"; + name.min_time = "min_time:3.4s"; + name.threads = "threads:256"; + EXPECT_EQ(name.str(), "function_name/min_time:3.4s/threads:256"); +} + +TEST(BenchmarkNameTest, TestEmptyFunctionName) { + auto name = BenchmarkName(); + name.args = "first:3/second:4"; + name.threads = "threads:22"; + EXPECT_EQ(name.str(), "first:3/second:4/threads:22"); +} + +} // end namespace diff --git a/benchmarks/thirdparty/benchmark/test/benchmark_test.cc b/benchmarks/thirdparty/benchmark/test/benchmark_test.cc new file mode 100755 index 0000000000..3cd4f5565f --- /dev/null +++ b/benchmarks/thirdparty/benchmark/test/benchmark_test.cc @@ -0,0 +1,245 @@ +#include "benchmark/benchmark.h" + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(__GNUC__) +#define BENCHMARK_NOINLINE __attribute__((noinline)) +#else +#define BENCHMARK_NOINLINE +#endif + +namespace { + +int BENCHMARK_NOINLINE Factorial(uint32_t n) { + return (n == 1) ? 1 : n * Factorial(n - 1); +} + +double CalculatePi(int depth) { + double pi = 0.0; + for (int i = 0; i < depth; ++i) { + double numerator = static_cast(((i % 2) * 2) - 1); + double denominator = static_cast((2 * i) - 1); + pi += numerator / denominator; + } + return (pi - 1.0) * 4; +} + +std::set ConstructRandomSet(int64_t size) { + std::set s; + for (int i = 0; i < size; ++i) s.insert(s.end(), i); + return s; +} + +std::mutex test_vector_mu; +std::vector* test_vector = nullptr; + +} // end namespace + +static void BM_Factorial(benchmark::State& state) { + int fac_42 = 0; + for (auto _ : state) fac_42 = Factorial(8); + // Prevent compiler optimizations + std::stringstream ss; + ss << fac_42; + state.SetLabel(ss.str()); +} +BENCHMARK(BM_Factorial); +BENCHMARK(BM_Factorial)->UseRealTime(); + +static void BM_CalculatePiRange(benchmark::State& state) { + double pi = 0.0; + for (auto _ : state) pi = CalculatePi(static_cast(state.range(0))); + std::stringstream ss; + ss << pi; + state.SetLabel(ss.str()); +} +BENCHMARK_RANGE(BM_CalculatePiRange, 1, 1024 * 1024); + +static void BM_CalculatePi(benchmark::State& state) { + static const int depth = 1024; + for (auto _ : state) { + benchmark::DoNotOptimize(CalculatePi(static_cast(depth))); + } +} +BENCHMARK(BM_CalculatePi)->Threads(8); +BENCHMARK(BM_CalculatePi)->ThreadRange(1, 32); +BENCHMARK(BM_CalculatePi)->ThreadPerCpu(); + +static void BM_SetInsert(benchmark::State& state) { + std::set data; + for (auto _ : state) { + state.PauseTiming(); + data = ConstructRandomSet(state.range(0)); + state.ResumeTiming(); + for (int j = 0; j < state.range(1); ++j) data.insert(rand()); + } + state.SetItemsProcessed(state.iterations() * state.range(1)); + state.SetBytesProcessed(state.iterations() * state.range(1) * sizeof(int)); +} + +// Test many inserts at once to reduce the total iterations needed. Otherwise, the slower, +// non-timed part of each iteration will make the benchmark take forever. +BENCHMARK(BM_SetInsert)->Ranges({{1 << 10, 8 << 10}, {128, 512}}); + +template +static void BM_Sequential(benchmark::State& state) { + ValueType v = 42; + for (auto _ : state) { + Container c; + for (int64_t i = state.range(0); --i;) c.push_back(v); + } + const int64_t items_processed = state.iterations() * state.range(0); + state.SetItemsProcessed(items_processed); + state.SetBytesProcessed(items_processed * sizeof(v)); +} +BENCHMARK_TEMPLATE2(BM_Sequential, std::vector, int) + ->Range(1 << 0, 1 << 10); +BENCHMARK_TEMPLATE(BM_Sequential, std::list)->Range(1 << 0, 1 << 10); +// Test the variadic version of BENCHMARK_TEMPLATE in C++11 and beyond. +#ifdef BENCHMARK_HAS_CXX11 +BENCHMARK_TEMPLATE(BM_Sequential, std::vector, int)->Arg(512); +#endif + +static void BM_StringCompare(benchmark::State& state) { + size_t len = static_cast(state.range(0)); + std::string s1(len, '-'); + std::string s2(len, '-'); + for (auto _ : state) benchmark::DoNotOptimize(s1.compare(s2)); +} +BENCHMARK(BM_StringCompare)->Range(1, 1 << 20); + +static void BM_SetupTeardown(benchmark::State& state) { + if (state.thread_index == 0) { + // No need to lock test_vector_mu here as this is running single-threaded. + test_vector = new std::vector(); + } + int i = 0; + for (auto _ : state) { + std::lock_guard l(test_vector_mu); + if (i % 2 == 0) + test_vector->push_back(i); + else + test_vector->pop_back(); + ++i; + } + if (state.thread_index == 0) { + delete test_vector; + } +} +BENCHMARK(BM_SetupTeardown)->ThreadPerCpu(); + +static void BM_LongTest(benchmark::State& state) { + double tracker = 0.0; + for (auto _ : state) { + for (int i = 0; i < state.range(0); ++i) + benchmark::DoNotOptimize(tracker += i); + } +} +BENCHMARK(BM_LongTest)->Range(1 << 16, 1 << 28); + +static void BM_ParallelMemset(benchmark::State& state) { + int64_t size = state.range(0) / static_cast(sizeof(int)); + int thread_size = static_cast(size) / state.threads; + int from = thread_size * state.thread_index; + int to = from + thread_size; + + if (state.thread_index == 0) { + test_vector = new std::vector(static_cast(size)); + } + + for (auto _ : state) { + for (int i = from; i < to; i++) { + // No need to lock test_vector_mu as ranges + // do not overlap between threads. + benchmark::DoNotOptimize(test_vector->at(i) = 1); + } + } + + if (state.thread_index == 0) { + delete test_vector; + } +} +BENCHMARK(BM_ParallelMemset)->Arg(10 << 20)->ThreadRange(1, 4); + +static void BM_ManualTiming(benchmark::State& state) { + int64_t slept_for = 0; + int64_t microseconds = state.range(0); + std::chrono::duration sleep_duration{ + static_cast(microseconds)}; + + for (auto _ : state) { + auto start = std::chrono::high_resolution_clock::now(); + // Simulate some useful workload with a sleep + std::this_thread::sleep_for( + std::chrono::duration_cast(sleep_duration)); + auto end = std::chrono::high_resolution_clock::now(); + + auto elapsed = + std::chrono::duration_cast>(end - start); + + state.SetIterationTime(elapsed.count()); + slept_for += microseconds; + } + state.SetItemsProcessed(slept_for); +} +BENCHMARK(BM_ManualTiming)->Range(1, 1 << 14)->UseRealTime(); +BENCHMARK(BM_ManualTiming)->Range(1, 1 << 14)->UseManualTime(); + +#ifdef BENCHMARK_HAS_CXX11 + +template +void BM_with_args(benchmark::State& state, Args&&...) { + for (auto _ : state) { + } +} +BENCHMARK_CAPTURE(BM_with_args, int_test, 42, 43, 44); +BENCHMARK_CAPTURE(BM_with_args, string_and_pair_test, std::string("abc"), + std::pair(42, 3.8)); + +void BM_non_template_args(benchmark::State& state, int, double) { + while(state.KeepRunning()) {} +} +BENCHMARK_CAPTURE(BM_non_template_args, basic_test, 0, 0); + +#endif // BENCHMARK_HAS_CXX11 + +static void BM_DenseThreadRanges(benchmark::State& st) { + switch (st.range(0)) { + case 1: + assert(st.threads == 1 || st.threads == 2 || st.threads == 3); + break; + case 2: + assert(st.threads == 1 || st.threads == 3 || st.threads == 4); + break; + case 3: + assert(st.threads == 5 || st.threads == 8 || st.threads == 11 || + st.threads == 14); + break; + default: + assert(false && "Invalid test case number"); + } + while (st.KeepRunning()) { + } +} +BENCHMARK(BM_DenseThreadRanges)->Arg(1)->DenseThreadRange(1, 3); +BENCHMARK(BM_DenseThreadRanges)->Arg(2)->DenseThreadRange(1, 4, 2); +BENCHMARK(BM_DenseThreadRanges)->Arg(3)->DenseThreadRange(5, 14, 3); + +BENCHMARK_MAIN(); diff --git a/benchmarks/thirdparty/benchmark/test/clobber_memory_assembly_test.cc b/benchmarks/thirdparty/benchmark/test/clobber_memory_assembly_test.cc new file mode 100755 index 0000000000..f41911a39c --- /dev/null +++ b/benchmarks/thirdparty/benchmark/test/clobber_memory_assembly_test.cc @@ -0,0 +1,64 @@ +#include + +#ifdef __clang__ +#pragma clang diagnostic ignored "-Wreturn-type" +#endif + +extern "C" { + +extern int ExternInt; +extern int ExternInt2; +extern int ExternInt3; + +} + +// CHECK-LABEL: test_basic: +extern "C" void test_basic() { + int x; + benchmark::DoNotOptimize(&x); + x = 101; + benchmark::ClobberMemory(); + // CHECK: leaq [[DEST:[^,]+]], %rax + // CHECK: movl $101, [[DEST]] + // CHECK: ret +} + +// CHECK-LABEL: test_redundant_store: +extern "C" void test_redundant_store() { + ExternInt = 3; + benchmark::ClobberMemory(); + ExternInt = 51; + // CHECK-DAG: ExternInt + // CHECK-DAG: movl $3 + // CHECK: movl $51 +} + +// CHECK-LABEL: test_redundant_read: +extern "C" void test_redundant_read() { + int x; + benchmark::DoNotOptimize(&x); + x = ExternInt; + benchmark::ClobberMemory(); + x = ExternInt2; + // CHECK: leaq [[DEST:[^,]+]], %rax + // CHECK: ExternInt(%rip) + // CHECK: movl %eax, [[DEST]] + // CHECK-NOT: ExternInt2 + // CHECK: ret +} + +// CHECK-LABEL: test_redundant_read2: +extern "C" void test_redundant_read2() { + int x; + benchmark::DoNotOptimize(&x); + x = ExternInt; + benchmark::ClobberMemory(); + x = ExternInt2; + benchmark::ClobberMemory(); + // CHECK: leaq [[DEST:[^,]+]], %rax + // CHECK: ExternInt(%rip) + // CHECK: movl %eax, [[DEST]] + // CHECK: ExternInt2(%rip) + // CHECK: movl %eax, [[DEST]] + // CHECK: ret +} diff --git a/benchmarks/thirdparty/benchmark/test/commandlineflags_gtest.cc b/benchmarks/thirdparty/benchmark/test/commandlineflags_gtest.cc new file mode 100755 index 0000000000..656020f2ec --- /dev/null +++ b/benchmarks/thirdparty/benchmark/test/commandlineflags_gtest.cc @@ -0,0 +1,201 @@ +#include + +#include "../src/commandlineflags.h" +#include "../src/internal_macros.h" +#include "gtest/gtest.h" + +namespace benchmark { +namespace { + +#if defined(BENCHMARK_OS_WINDOWS) +int setenv(const char* name, const char* value, int overwrite) { + if (!overwrite) { + // NOTE: getenv_s is far superior but not available under mingw. + char* env_value = getenv(name); + if (env_value == nullptr) { + return -1; + } + } + return _putenv_s(name, value); +} + +int unsetenv(const char* name) { + return _putenv_s(name, ""); +} + +#endif // BENCHMARK_OS_WINDOWS + +TEST(BoolFromEnv, Default) { + ASSERT_EQ(unsetenv("NOT_IN_ENV"), 0); + EXPECT_EQ(BoolFromEnv("not_in_env", true), true); +} + +TEST(BoolFromEnv, False) { + ASSERT_EQ(setenv("IN_ENV", "0", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", true), false); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "N", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", true), false); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "n", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", true), false); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "NO", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", true), false); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "No", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", true), false); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "no", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", true), false); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "F", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", true), false); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "f", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", true), false); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "FALSE", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", true), false); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "False", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", true), false); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "false", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", true), false); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "OFF", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", true), false); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "Off", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", true), false); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "off", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", true), false); + unsetenv("IN_ENV"); +} + +TEST(BoolFromEnv, True) { + ASSERT_EQ(setenv("IN_ENV", "1", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", false), true); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "Y", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", false), true); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "y", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", false), true); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "YES", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", false), true); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "Yes", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", false), true); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "yes", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", false), true); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "T", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", false), true); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "t", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", false), true); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "TRUE", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", false), true); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "True", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", false), true); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "true", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", false), true); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "ON", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", false), true); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "On", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", false), true); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "on", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", false), true); + unsetenv("IN_ENV"); + +#ifndef BENCHMARK_OS_WINDOWS + ASSERT_EQ(setenv("IN_ENV", "", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", false), true); + unsetenv("IN_ENV"); +#endif +} + +TEST(Int32FromEnv, NotInEnv) { + ASSERT_EQ(unsetenv("NOT_IN_ENV"), 0); + EXPECT_EQ(Int32FromEnv("not_in_env", 42), 42); +} + +TEST(Int32FromEnv, InvalidInteger) { + ASSERT_EQ(setenv("IN_ENV", "foo", 1), 0); + EXPECT_EQ(Int32FromEnv("in_env", 42), 42); + unsetenv("IN_ENV"); +} + +TEST(Int32FromEnv, ValidInteger) { + ASSERT_EQ(setenv("IN_ENV", "42", 1), 0); + EXPECT_EQ(Int32FromEnv("in_env", 64), 42); + unsetenv("IN_ENV"); +} + +TEST(DoubleFromEnv, NotInEnv) { + ASSERT_EQ(unsetenv("NOT_IN_ENV"), 0); + EXPECT_EQ(DoubleFromEnv("not_in_env", 0.51), 0.51); +} + +TEST(DoubleFromEnv, InvalidReal) { + ASSERT_EQ(setenv("IN_ENV", "foo", 1), 0); + EXPECT_EQ(DoubleFromEnv("in_env", 0.51), 0.51); + unsetenv("IN_ENV"); +} + +TEST(DoubleFromEnv, ValidReal) { + ASSERT_EQ(setenv("IN_ENV", "0.51", 1), 0); + EXPECT_EQ(DoubleFromEnv("in_env", 0.71), 0.51); + unsetenv("IN_ENV"); +} + +TEST(StringFromEnv, Default) { + ASSERT_EQ(unsetenv("NOT_IN_ENV"), 0); + EXPECT_STREQ(StringFromEnv("not_in_env", "foo"), "foo"); +} + +TEST(StringFromEnv, Valid) { + ASSERT_EQ(setenv("IN_ENV", "foo", 1), 0); + EXPECT_STREQ(StringFromEnv("in_env", "bar"), "foo"); + unsetenv("IN_ENV"); +} + +} // namespace +} // namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/test/complexity_test.cc b/benchmarks/thirdparty/benchmark/test/complexity_test.cc new file mode 100755 index 0000000000..5681fdcf34 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/test/complexity_test.cc @@ -0,0 +1,213 @@ +#undef NDEBUG +#include +#include +#include +#include +#include +#include "benchmark/benchmark.h" +#include "output_test.h" + +namespace { + +#define ADD_COMPLEXITY_CASES(...) \ + int CONCAT(dummy, __LINE__) = AddComplexityTest(__VA_ARGS__) + +int AddComplexityTest(std::string test_name, std::string big_o_test_name, + std::string rms_test_name, std::string big_o) { + SetSubstitutions({{"%name", test_name}, + {"%bigo_name", big_o_test_name}, + {"%rms_name", rms_test_name}, + {"%bigo_str", "[ ]* %float " + big_o}, + {"%bigo", big_o}, + {"%rms", "[ ]*[0-9]+ %"}}); + AddCases( + TC_ConsoleOut, + {{"^%bigo_name %bigo_str %bigo_str[ ]*$"}, + {"^%bigo_name", MR_Not}, // Assert we we didn't only matched a name. + {"^%rms_name %rms %rms[ ]*$", MR_Next}}); + AddCases(TC_JSONOut, {{"\"name\": \"%bigo_name\",$"}, + {"\"run_name\": \"%name\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": %int,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"BigO\",$", MR_Next}, + {"\"cpu_coefficient\": %float,$", MR_Next}, + {"\"real_coefficient\": %float,$", MR_Next}, + {"\"big_o\": \"%bigo\",$", MR_Next}, + {"\"time_unit\": \"ns\"$", MR_Next}, + {"}", MR_Next}, + {"\"name\": \"%rms_name\",$"}, + {"\"run_name\": \"%name\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": %int,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"RMS\",$", MR_Next}, + {"\"rms\": %float$", MR_Next}, + {"}", MR_Next}}); + AddCases(TC_CSVOut, {{"^\"%bigo_name\",,%float,%float,%bigo,,,,,$"}, + {"^\"%bigo_name\"", MR_Not}, + {"^\"%rms_name\",,%float,%float,,,,,,$", MR_Next}}); + return 0; +} + +} // end namespace + +// ========================================================================= // +// --------------------------- Testing BigO O(1) --------------------------- // +// ========================================================================= // + +void BM_Complexity_O1(benchmark::State& state) { + for (auto _ : state) { + for (int i = 0; i < 1024; ++i) { + benchmark::DoNotOptimize(&i); + } + } + state.SetComplexityN(state.range(0)); +} +BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1); +BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(); +BENCHMARK(BM_Complexity_O1) + ->Range(1, 1 << 18) + ->Complexity([](benchmark::IterationCount) { return 1.0; }); + +const char *one_test_name = "BM_Complexity_O1"; +const char *big_o_1_test_name = "BM_Complexity_O1_BigO"; +const char *rms_o_1_test_name = "BM_Complexity_O1_RMS"; +const char *enum_big_o_1 = "\\([0-9]+\\)"; +// FIXME: Tolerate both '(1)' and 'lgN' as output when the complexity is auto +// deduced. +// See https://github.com/google/benchmark/issues/272 +const char *auto_big_o_1 = "(\\([0-9]+\\))|(lgN)"; +const char *lambda_big_o_1 = "f\\(N\\)"; + +// Add enum tests +ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, + enum_big_o_1); + +// Add auto enum tests +ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, + auto_big_o_1); + +// Add lambda tests +ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, + lambda_big_o_1); + +// ========================================================================= // +// --------------------------- Testing BigO O(N) --------------------------- // +// ========================================================================= // + +std::vector ConstructRandomVector(int64_t size) { + std::vector v; + v.reserve(static_cast(size)); + for (int i = 0; i < size; ++i) { + v.push_back(static_cast(std::rand() % size)); + } + return v; +} + +void BM_Complexity_O_N(benchmark::State& state) { + auto v = ConstructRandomVector(state.range(0)); + // Test worst case scenario (item not in vector) + const int64_t item_not_in_vector = state.range(0) * 2; + for (auto _ : state) { + benchmark::DoNotOptimize(std::find(v.begin(), v.end(), item_not_in_vector)); + } + state.SetComplexityN(state.range(0)); +} +BENCHMARK(BM_Complexity_O_N) + ->RangeMultiplier(2) + ->Range(1 << 10, 1 << 16) + ->Complexity(benchmark::oN); +BENCHMARK(BM_Complexity_O_N) + ->RangeMultiplier(2) + ->Range(1 << 10, 1 << 16) + ->Complexity([](benchmark::IterationCount n) -> double { + return static_cast(n); + }); +BENCHMARK(BM_Complexity_O_N) + ->RangeMultiplier(2) + ->Range(1 << 10, 1 << 16) + ->Complexity(); + +const char *n_test_name = "BM_Complexity_O_N"; +const char *big_o_n_test_name = "BM_Complexity_O_N_BigO"; +const char *rms_o_n_test_name = "BM_Complexity_O_N_RMS"; +const char *enum_auto_big_o_n = "N"; +const char *lambda_big_o_n = "f\\(N\\)"; + +// Add enum tests +ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name, + enum_auto_big_o_n); + +// Add lambda tests +ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name, + lambda_big_o_n); + +// ========================================================================= // +// ------------------------- Testing BigO O(N*lgN) ------------------------- // +// ========================================================================= // + +static void BM_Complexity_O_N_log_N(benchmark::State& state) { + auto v = ConstructRandomVector(state.range(0)); + for (auto _ : state) { + std::sort(v.begin(), v.end()); + } + state.SetComplexityN(state.range(0)); +} +static const double kLog2E = 1.44269504088896340736; +BENCHMARK(BM_Complexity_O_N_log_N) + ->RangeMultiplier(2) + ->Range(1 << 10, 1 << 16) + ->Complexity(benchmark::oNLogN); +BENCHMARK(BM_Complexity_O_N_log_N) + ->RangeMultiplier(2) + ->Range(1 << 10, 1 << 16) + ->Complexity([](benchmark::IterationCount n) { + return kLog2E * n * log(static_cast(n)); + }); +BENCHMARK(BM_Complexity_O_N_log_N) + ->RangeMultiplier(2) + ->Range(1 << 10, 1 << 16) + ->Complexity(); + +const char *n_lg_n_test_name = "BM_Complexity_O_N_log_N"; +const char *big_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_BigO"; +const char *rms_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_RMS"; +const char *enum_auto_big_o_n_lg_n = "NlgN"; +const char *lambda_big_o_n_lg_n = "f\\(N\\)"; + +// Add enum tests +ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name, + rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n); + +// Add lambda tests +ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name, + rms_o_n_lg_n_test_name, lambda_big_o_n_lg_n); + +// ========================================================================= // +// -------- Testing formatting of Complexity with captured args ------------ // +// ========================================================================= // + +void BM_ComplexityCaptureArgs(benchmark::State& state, int n) { + for (auto _ : state) { + // This test requires a non-zero CPU time to avoid divide-by-zero + benchmark::DoNotOptimize(state.iterations()); + } + state.SetComplexityN(n); +} + +BENCHMARK_CAPTURE(BM_ComplexityCaptureArgs, capture_test, 100) + ->Complexity(benchmark::oN) + ->Ranges({{1, 2}, {3, 4}}); + +const std::string complexity_capture_name = + "BM_ComplexityCaptureArgs/capture_test"; + +ADD_COMPLEXITY_CASES(complexity_capture_name, complexity_capture_name + "_BigO", + complexity_capture_name + "_RMS", "N"); + +// ========================================================================= // +// --------------------------- TEST CASES END ------------------------------ // +// ========================================================================= // + +int main(int argc, char *argv[]) { RunOutputTests(argc, argv); } diff --git a/benchmarks/thirdparty/benchmark/test/cxx03_test.cc b/benchmarks/thirdparty/benchmark/test/cxx03_test.cc new file mode 100755 index 0000000000..c4c9a52273 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/test/cxx03_test.cc @@ -0,0 +1,63 @@ +#undef NDEBUG +#include +#include + +#include "benchmark/benchmark.h" + +#if __cplusplus >= 201103L +#error C++11 or greater detected. Should be C++03. +#endif + +#ifdef BENCHMARK_HAS_CXX11 +#error C++11 or greater detected by the library. BENCHMARK_HAS_CXX11 is defined. +#endif + +void BM_empty(benchmark::State& state) { + while (state.KeepRunning()) { + volatile benchmark::IterationCount x = state.iterations(); + ((void)x); + } +} +BENCHMARK(BM_empty); + +// The new C++11 interface for args/ranges requires initializer list support. +// Therefore we provide the old interface to support C++03. +void BM_old_arg_range_interface(benchmark::State& state) { + assert((state.range(0) == 1 && state.range(1) == 2) || + (state.range(0) == 5 && state.range(1) == 6)); + while (state.KeepRunning()) { + } +} +BENCHMARK(BM_old_arg_range_interface)->ArgPair(1, 2)->RangePair(5, 5, 6, 6); + +template +void BM_template2(benchmark::State& state) { + BM_empty(state); +} +BENCHMARK_TEMPLATE2(BM_template2, int, long); + +template +void BM_template1(benchmark::State& state) { + BM_empty(state); +} +BENCHMARK_TEMPLATE(BM_template1, long); +BENCHMARK_TEMPLATE1(BM_template1, int); + +template +struct BM_Fixture : public ::benchmark::Fixture { +}; + +BENCHMARK_TEMPLATE_F(BM_Fixture, BM_template1, long)(benchmark::State& state) { + BM_empty(state); +} +BENCHMARK_TEMPLATE1_F(BM_Fixture, BM_template2, int)(benchmark::State& state) { + BM_empty(state); +} + +void BM_counters(benchmark::State& state) { + BM_empty(state); + state.counters["Foo"] = 2; +} +BENCHMARK(BM_counters); + +BENCHMARK_MAIN(); diff --git a/benchmarks/thirdparty/benchmark/test/diagnostics_test.cc b/benchmarks/thirdparty/benchmark/test/diagnostics_test.cc new file mode 100755 index 0000000000..dd64a33655 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/test/diagnostics_test.cc @@ -0,0 +1,80 @@ +// Testing: +// State::PauseTiming() +// State::ResumeTiming() +// Test that CHECK's within these function diagnose when they are called +// outside of the KeepRunning() loop. +// +// NOTE: Users should NOT include or use src/check.h. This is only done in +// order to test library internals. + +#include +#include + +#include "../src/check.h" +#include "benchmark/benchmark.h" + +#if defined(__GNUC__) && !defined(__EXCEPTIONS) +#define TEST_HAS_NO_EXCEPTIONS +#endif + +void TestHandler() { +#ifndef TEST_HAS_NO_EXCEPTIONS + throw std::logic_error(""); +#else + std::abort(); +#endif +} + +void try_invalid_pause_resume(benchmark::State& state) { +#if !defined(TEST_BENCHMARK_LIBRARY_HAS_NO_ASSERTIONS) && !defined(TEST_HAS_NO_EXCEPTIONS) + try { + state.PauseTiming(); + std::abort(); + } catch (std::logic_error const&) { + } + try { + state.ResumeTiming(); + std::abort(); + } catch (std::logic_error const&) { + } +#else + (void)state; // avoid unused warning +#endif +} + +void BM_diagnostic_test(benchmark::State& state) { + static bool called_once = false; + + if (called_once == false) try_invalid_pause_resume(state); + + for (auto _ : state) { + benchmark::DoNotOptimize(state.iterations()); + } + + if (called_once == false) try_invalid_pause_resume(state); + + called_once = true; +} +BENCHMARK(BM_diagnostic_test); + + +void BM_diagnostic_test_keep_running(benchmark::State& state) { + static bool called_once = false; + + if (called_once == false) try_invalid_pause_resume(state); + + while(state.KeepRunning()) { + benchmark::DoNotOptimize(state.iterations()); + } + + if (called_once == false) try_invalid_pause_resume(state); + + called_once = true; +} +BENCHMARK(BM_diagnostic_test_keep_running); + +int main(int argc, char* argv[]) { + benchmark::internal::GetAbortHandler() = &TestHandler; + benchmark::Initialize(&argc, argv); + benchmark::RunSpecifiedBenchmarks(); +} diff --git a/benchmarks/thirdparty/benchmark/test/display_aggregates_only_test.cc b/benchmarks/thirdparty/benchmark/test/display_aggregates_only_test.cc new file mode 100755 index 0000000000..3c36d3f03c --- /dev/null +++ b/benchmarks/thirdparty/benchmark/test/display_aggregates_only_test.cc @@ -0,0 +1,43 @@ + +#undef NDEBUG +#include +#include + +#include "benchmark/benchmark.h" +#include "output_test.h" + +// Ok this test is super ugly. We want to check what happens with the file +// reporter in the presence of DisplayAggregatesOnly(). +// We do not care about console output, the normal tests check that already. + +void BM_SummaryRepeat(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->DisplayAggregatesOnly(); + +int main(int argc, char* argv[]) { + const std::string output = GetFileReporterOutput(argc, argv); + + if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 6 || + SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3\"") != 3 || + SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_mean\"") != 1 || + SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_median\"") != + 1 || + SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"") != + 1) { + std::cout << "Precondition mismatch. Expected to only find 6 " + "occurrences of \"BM_SummaryRepeat/repeats:3\" substring:\n" + "\"name\": \"BM_SummaryRepeat/repeats:3\", " + "\"name\": \"BM_SummaryRepeat/repeats:3\", " + "\"name\": \"BM_SummaryRepeat/repeats:3\", " + "\"name\": \"BM_SummaryRepeat/repeats:3_mean\", " + "\"name\": \"BM_SummaryRepeat/repeats:3_median\", " + "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"\nThe entire " + "output:\n"; + std::cout << output; + return 1; + } + + return 0; +} diff --git a/benchmarks/thirdparty/benchmark/test/donotoptimize_assembly_test.cc b/benchmarks/thirdparty/benchmark/test/donotoptimize_assembly_test.cc new file mode 100755 index 0000000000..d4b0bab70e --- /dev/null +++ b/benchmarks/thirdparty/benchmark/test/donotoptimize_assembly_test.cc @@ -0,0 +1,163 @@ +#include + +#ifdef __clang__ +#pragma clang diagnostic ignored "-Wreturn-type" +#endif + +extern "C" { + +extern int ExternInt; +extern int ExternInt2; +extern int ExternInt3; + +inline int Add42(int x) { return x + 42; } + +struct NotTriviallyCopyable { + NotTriviallyCopyable(); + explicit NotTriviallyCopyable(int x) : value(x) {} + NotTriviallyCopyable(NotTriviallyCopyable const&); + int value; +}; + +struct Large { + int value; + int data[2]; +}; + +} +// CHECK-LABEL: test_with_rvalue: +extern "C" void test_with_rvalue() { + benchmark::DoNotOptimize(Add42(0)); + // CHECK: movl $42, %eax + // CHECK: ret +} + +// CHECK-LABEL: test_with_large_rvalue: +extern "C" void test_with_large_rvalue() { + benchmark::DoNotOptimize(Large{ExternInt, {ExternInt, ExternInt}}); + // CHECK: ExternInt(%rip) + // CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]] + // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) + // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) + // CHECK: ret +} + +// CHECK-LABEL: test_with_non_trivial_rvalue: +extern "C" void test_with_non_trivial_rvalue() { + benchmark::DoNotOptimize(NotTriviallyCopyable(ExternInt)); + // CHECK: mov{{l|q}} ExternInt(%rip) + // CHECK: ret +} + +// CHECK-LABEL: test_with_lvalue: +extern "C" void test_with_lvalue() { + int x = 101; + benchmark::DoNotOptimize(x); + // CHECK-GNU: movl $101, %eax + // CHECK-CLANG: movl $101, -{{[0-9]+}}(%[[REG:[a-z]+]]) + // CHECK: ret +} + +// CHECK-LABEL: test_with_large_lvalue: +extern "C" void test_with_large_lvalue() { + Large L{ExternInt, {ExternInt, ExternInt}}; + benchmark::DoNotOptimize(L); + // CHECK: ExternInt(%rip) + // CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]]) + // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) + // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) + // CHECK: ret +} + +// CHECK-LABEL: test_with_non_trivial_lvalue: +extern "C" void test_with_non_trivial_lvalue() { + NotTriviallyCopyable NTC(ExternInt); + benchmark::DoNotOptimize(NTC); + // CHECK: ExternInt(%rip) + // CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]]) + // CHECK: ret +} + +// CHECK-LABEL: test_with_const_lvalue: +extern "C" void test_with_const_lvalue() { + const int x = 123; + benchmark::DoNotOptimize(x); + // CHECK: movl $123, %eax + // CHECK: ret +} + +// CHECK-LABEL: test_with_large_const_lvalue: +extern "C" void test_with_large_const_lvalue() { + const Large L{ExternInt, {ExternInt, ExternInt}}; + benchmark::DoNotOptimize(L); + // CHECK: ExternInt(%rip) + // CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]]) + // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) + // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) + // CHECK: ret +} + +// CHECK-LABEL: test_with_non_trivial_const_lvalue: +extern "C" void test_with_non_trivial_const_lvalue() { + const NotTriviallyCopyable Obj(ExternInt); + benchmark::DoNotOptimize(Obj); + // CHECK: mov{{q|l}} ExternInt(%rip) + // CHECK: ret +} + +// CHECK-LABEL: test_div_by_two: +extern "C" int test_div_by_two(int input) { + int divisor = 2; + benchmark::DoNotOptimize(divisor); + return input / divisor; + // CHECK: movl $2, [[DEST:.*]] + // CHECK: idivl [[DEST]] + // CHECK: ret +} + +// CHECK-LABEL: test_inc_integer: +extern "C" int test_inc_integer() { + int x = 0; + for (int i=0; i < 5; ++i) + benchmark::DoNotOptimize(++x); + // CHECK: movl $1, [[DEST:.*]] + // CHECK: {{(addl \$1,|incl)}} [[DEST]] + // CHECK: {{(addl \$1,|incl)}} [[DEST]] + // CHECK: {{(addl \$1,|incl)}} [[DEST]] + // CHECK: {{(addl \$1,|incl)}} [[DEST]] + // CHECK-CLANG: movl [[DEST]], %eax + // CHECK: ret + return x; +} + +// CHECK-LABEL: test_pointer_rvalue +extern "C" void test_pointer_rvalue() { + // CHECK: movl $42, [[DEST:.*]] + // CHECK: leaq [[DEST]], %rax + // CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z]+]]) + // CHECK: ret + int x = 42; + benchmark::DoNotOptimize(&x); +} + +// CHECK-LABEL: test_pointer_const_lvalue: +extern "C" void test_pointer_const_lvalue() { + // CHECK: movl $42, [[DEST:.*]] + // CHECK: leaq [[DEST]], %rax + // CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z]+]]) + // CHECK: ret + int x = 42; + int * const xp = &x; + benchmark::DoNotOptimize(xp); +} + +// CHECK-LABEL: test_pointer_lvalue: +extern "C" void test_pointer_lvalue() { + // CHECK: movl $42, [[DEST:.*]] + // CHECK: leaq [[DEST]], %rax + // CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z+]+]]) + // CHECK: ret + int x = 42; + int *xp = &x; + benchmark::DoNotOptimize(xp); +} diff --git a/benchmarks/thirdparty/benchmark/test/donotoptimize_test.cc b/benchmarks/thirdparty/benchmark/test/donotoptimize_test.cc new file mode 100755 index 0000000000..2ce92d1c72 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/test/donotoptimize_test.cc @@ -0,0 +1,52 @@ +#include "benchmark/benchmark.h" + +#include + +namespace { +#if defined(__GNUC__) +std::uint64_t double_up(const std::uint64_t x) __attribute__((const)); +#endif +std::uint64_t double_up(const std::uint64_t x) { return x * 2; } +} + +// Using DoNotOptimize on types like BitRef seem to cause a lot of problems +// with the inline assembly on both GCC and Clang. +struct BitRef { + int index; + unsigned char &byte; + +public: + static BitRef Make() { + static unsigned char arr[2] = {}; + BitRef b(1, arr[0]); + return b; + } +private: + BitRef(int i, unsigned char& b) : index(i), byte(b) {} +}; + +int main(int, char*[]) { + // this test verifies compilation of DoNotOptimize() for some types + + char buffer8[8] = ""; + benchmark::DoNotOptimize(buffer8); + + char buffer20[20] = ""; + benchmark::DoNotOptimize(buffer20); + + char buffer1024[1024] = ""; + benchmark::DoNotOptimize(buffer1024); + benchmark::DoNotOptimize(&buffer1024[0]); + + int x = 123; + benchmark::DoNotOptimize(x); + benchmark::DoNotOptimize(&x); + benchmark::DoNotOptimize(x += 42); + + benchmark::DoNotOptimize(double_up(x)); + + // These tests are to e + benchmark::DoNotOptimize(BitRef::Make()); + BitRef lval = BitRef::Make(); + benchmark::DoNotOptimize(lval); +} diff --git a/benchmarks/thirdparty/benchmark/test/filter_test.cc b/benchmarks/thirdparty/benchmark/test/filter_test.cc new file mode 100755 index 0000000000..0e27065c15 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/test/filter_test.cc @@ -0,0 +1,104 @@ +#include "benchmark/benchmark.h" + +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace { + +class TestReporter : public benchmark::ConsoleReporter { + public: + virtual bool ReportContext(const Context& context) { + return ConsoleReporter::ReportContext(context); + }; + + virtual void ReportRuns(const std::vector& report) { + ++count_; + ConsoleReporter::ReportRuns(report); + }; + + TestReporter() : count_(0) {} + + virtual ~TestReporter() {} + + size_t GetCount() const { return count_; } + + private: + mutable size_t count_; +}; + +} // end namespace + +static void NoPrefix(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(NoPrefix); + +static void BM_Foo(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_Foo); + +static void BM_Bar(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_Bar); + +static void BM_FooBar(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_FooBar); + +static void BM_FooBa(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_FooBa); + +int main(int argc, char **argv) { + bool list_only = false; + for (int i = 0; i < argc; ++i) + list_only |= std::string(argv[i]).find("--benchmark_list_tests") != + std::string::npos; + + benchmark::Initialize(&argc, argv); + + TestReporter test_reporter; + const size_t returned_count = + benchmark::RunSpecifiedBenchmarks(&test_reporter); + + if (argc == 2) { + // Make sure we ran all of the tests + std::stringstream ss(argv[1]); + size_t expected_return; + ss >> expected_return; + + if (returned_count != expected_return) { + std::cerr << "ERROR: Expected " << expected_return + << " tests to match the filter but returned_count = " + << returned_count << std::endl; + return -1; + } + + const size_t expected_reports = list_only ? 0 : expected_return; + const size_t reports_count = test_reporter.GetCount(); + if (reports_count != expected_reports) { + std::cerr << "ERROR: Expected " << expected_reports + << " tests to be run but reported_count = " << reports_count + << std::endl; + return -1; + } + } + + return 0; +} diff --git a/benchmarks/thirdparty/benchmark/test/fixture_test.cc b/benchmarks/thirdparty/benchmark/test/fixture_test.cc new file mode 100755 index 0000000000..1462b10f02 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/test/fixture_test.cc @@ -0,0 +1,49 @@ + +#include "benchmark/benchmark.h" + +#include +#include + +class MyFixture : public ::benchmark::Fixture { + public: + void SetUp(const ::benchmark::State& state) { + if (state.thread_index == 0) { + assert(data.get() == nullptr); + data.reset(new int(42)); + } + } + + void TearDown(const ::benchmark::State& state) { + if (state.thread_index == 0) { + assert(data.get() != nullptr); + data.reset(); + } + } + + ~MyFixture() { assert(data == nullptr); } + + std::unique_ptr data; +}; + +BENCHMARK_F(MyFixture, Foo)(benchmark::State &st) { + assert(data.get() != nullptr); + assert(*data == 42); + for (auto _ : st) { + } +} + +BENCHMARK_DEFINE_F(MyFixture, Bar)(benchmark::State& st) { + if (st.thread_index == 0) { + assert(data.get() != nullptr); + assert(*data == 42); + } + for (auto _ : st) { + assert(data.get() != nullptr); + assert(*data == 42); + } + st.SetItemsProcessed(st.range(0)); +} +BENCHMARK_REGISTER_F(MyFixture, Bar)->Arg(42); +BENCHMARK_REGISTER_F(MyFixture, Bar)->Arg(42)->ThreadPerCpu(); + +BENCHMARK_MAIN(); diff --git a/benchmarks/thirdparty/benchmark/test/internal_threading_test.cc b/benchmarks/thirdparty/benchmark/test/internal_threading_test.cc new file mode 100755 index 0000000000..039d7c14a8 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/test/internal_threading_test.cc @@ -0,0 +1,184 @@ + +#undef NDEBUG + +#include +#include +#include "../src/timers.h" +#include "benchmark/benchmark.h" +#include "output_test.h" + +static const std::chrono::duration time_frame(50); +static const double time_frame_in_sec( + std::chrono::duration_cast>>( + time_frame) + .count()); + +void MyBusySpinwait() { + const auto start = benchmark::ChronoClockNow(); + + while (true) { + const auto now = benchmark::ChronoClockNow(); + const auto elapsed = now - start; + + if (std::chrono::duration(elapsed) >= + time_frame) + return; + } +} + +// ========================================================================= // +// --------------------------- TEST CASES BEGIN ---------------------------- // +// ========================================================================= // + +// ========================================================================= // +// BM_MainThread + +void BM_MainThread(benchmark::State& state) { + for (auto _ : state) { + MyBusySpinwait(); + state.SetIterationTime(time_frame_in_sec); + } + state.counters["invtime"] = + benchmark::Counter{1, benchmark::Counter::kIsRate}; +} + +BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1); +BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->UseRealTime(); +BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->UseManualTime(); +BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime(); +BENCHMARK(BM_MainThread) + ->Iterations(1) + ->Threads(1) + ->MeasureProcessCPUTime() + ->UseRealTime(); +BENCHMARK(BM_MainThread) + ->Iterations(1) + ->Threads(1) + ->MeasureProcessCPUTime() + ->UseManualTime(); + +BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2); +BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->UseRealTime(); +BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->UseManualTime(); +BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime(); +BENCHMARK(BM_MainThread) + ->Iterations(1) + ->Threads(2) + ->MeasureProcessCPUTime() + ->UseRealTime(); +BENCHMARK(BM_MainThread) + ->Iterations(1) + ->Threads(2) + ->MeasureProcessCPUTime() + ->UseManualTime(); + +// ========================================================================= // +// BM_WorkerThread + +void BM_WorkerThread(benchmark::State& state) { + for (auto _ : state) { + std::thread Worker(&MyBusySpinwait); + Worker.join(); + state.SetIterationTime(time_frame_in_sec); + } + state.counters["invtime"] = + benchmark::Counter{1, benchmark::Counter::kIsRate}; +} + +BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1); +BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->UseRealTime(); +BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->UseManualTime(); +BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime(); +BENCHMARK(BM_WorkerThread) + ->Iterations(1) + ->Threads(1) + ->MeasureProcessCPUTime() + ->UseRealTime(); +BENCHMARK(BM_WorkerThread) + ->Iterations(1) + ->Threads(1) + ->MeasureProcessCPUTime() + ->UseManualTime(); + +BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2); +BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->UseRealTime(); +BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->UseManualTime(); +BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime(); +BENCHMARK(BM_WorkerThread) + ->Iterations(1) + ->Threads(2) + ->MeasureProcessCPUTime() + ->UseRealTime(); +BENCHMARK(BM_WorkerThread) + ->Iterations(1) + ->Threads(2) + ->MeasureProcessCPUTime() + ->UseManualTime(); + +// ========================================================================= // +// BM_MainThreadAndWorkerThread + +void BM_MainThreadAndWorkerThread(benchmark::State& state) { + for (auto _ : state) { + std::thread Worker(&MyBusySpinwait); + MyBusySpinwait(); + Worker.join(); + state.SetIterationTime(time_frame_in_sec); + } + state.counters["invtime"] = + benchmark::Counter{1, benchmark::Counter::kIsRate}; +} + +BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(1); +BENCHMARK(BM_MainThreadAndWorkerThread) + ->Iterations(1) + ->Threads(1) + ->UseRealTime(); +BENCHMARK(BM_MainThreadAndWorkerThread) + ->Iterations(1) + ->Threads(1) + ->UseManualTime(); +BENCHMARK(BM_MainThreadAndWorkerThread) + ->Iterations(1) + ->Threads(1) + ->MeasureProcessCPUTime(); +BENCHMARK(BM_MainThreadAndWorkerThread) + ->Iterations(1) + ->Threads(1) + ->MeasureProcessCPUTime() + ->UseRealTime(); +BENCHMARK(BM_MainThreadAndWorkerThread) + ->Iterations(1) + ->Threads(1) + ->MeasureProcessCPUTime() + ->UseManualTime(); + +BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(2); +BENCHMARK(BM_MainThreadAndWorkerThread) + ->Iterations(1) + ->Threads(2) + ->UseRealTime(); +BENCHMARK(BM_MainThreadAndWorkerThread) + ->Iterations(1) + ->Threads(2) + ->UseManualTime(); +BENCHMARK(BM_MainThreadAndWorkerThread) + ->Iterations(1) + ->Threads(2) + ->MeasureProcessCPUTime(); +BENCHMARK(BM_MainThreadAndWorkerThread) + ->Iterations(1) + ->Threads(2) + ->MeasureProcessCPUTime() + ->UseRealTime(); +BENCHMARK(BM_MainThreadAndWorkerThread) + ->Iterations(1) + ->Threads(2) + ->MeasureProcessCPUTime() + ->UseManualTime(); + +// ========================================================================= // +// ---------------------------- TEST CASES END ----------------------------- // +// ========================================================================= // + +int main(int argc, char* argv[]) { RunOutputTests(argc, argv); } diff --git a/benchmarks/thirdparty/benchmark/test/link_main_test.cc b/benchmarks/thirdparty/benchmark/test/link_main_test.cc new file mode 100755 index 0000000000..241ad5c390 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/test/link_main_test.cc @@ -0,0 +1,8 @@ +#include "benchmark/benchmark.h" + +void BM_empty(benchmark::State& state) { + for (auto _ : state) { + benchmark::DoNotOptimize(state.iterations()); + } +} +BENCHMARK(BM_empty); diff --git a/benchmarks/thirdparty/benchmark/test/map_test.cc b/benchmarks/thirdparty/benchmark/test/map_test.cc new file mode 100755 index 0000000000..dbf7982a36 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/test/map_test.cc @@ -0,0 +1,57 @@ +#include "benchmark/benchmark.h" + +#include +#include + +namespace { + +std::map ConstructRandomMap(int size) { + std::map m; + for (int i = 0; i < size; ++i) { + m.insert(std::make_pair(std::rand() % size, std::rand() % size)); + } + return m; +} + +} // namespace + +// Basic version. +static void BM_MapLookup(benchmark::State& state) { + const int size = static_cast(state.range(0)); + std::map m; + for (auto _ : state) { + state.PauseTiming(); + m = ConstructRandomMap(size); + state.ResumeTiming(); + for (int i = 0; i < size; ++i) { + benchmark::DoNotOptimize(m.find(std::rand() % size)); + } + } + state.SetItemsProcessed(state.iterations() * size); +} +BENCHMARK(BM_MapLookup)->Range(1 << 3, 1 << 12); + +// Using fixtures. +class MapFixture : public ::benchmark::Fixture { + public: + void SetUp(const ::benchmark::State& st) { + m = ConstructRandomMap(static_cast(st.range(0))); + } + + void TearDown(const ::benchmark::State&) { m.clear(); } + + std::map m; +}; + +BENCHMARK_DEFINE_F(MapFixture, Lookup)(benchmark::State& state) { + const int size = static_cast(state.range(0)); + for (auto _ : state) { + for (int i = 0; i < size; ++i) { + benchmark::DoNotOptimize(m.find(std::rand() % size)); + } + } + state.SetItemsProcessed(state.iterations() * size); +} +BENCHMARK_REGISTER_F(MapFixture, Lookup)->Range(1 << 3, 1 << 12); + +BENCHMARK_MAIN(); diff --git a/benchmarks/thirdparty/benchmark/test/memory_manager_test.cc b/benchmarks/thirdparty/benchmark/test/memory_manager_test.cc new file mode 100755 index 0000000000..90bed16cff --- /dev/null +++ b/benchmarks/thirdparty/benchmark/test/memory_manager_test.cc @@ -0,0 +1,44 @@ +#include + +#include "../src/check.h" +#include "benchmark/benchmark.h" +#include "output_test.h" + +class TestMemoryManager : public benchmark::MemoryManager { + void Start() {} + void Stop(Result* result) { + result->num_allocs = 42; + result->max_bytes_used = 42000; + } +}; + +void BM_empty(benchmark::State& state) { + for (auto _ : state) { + benchmark::DoNotOptimize(state.iterations()); + } +} +BENCHMARK(BM_empty); + +ADD_CASES(TC_ConsoleOut, {{"^BM_empty %console_report$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_empty\",$"}, + {"\"run_name\": \"BM_empty\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 0,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"allocs_per_iter\": %float,$", MR_Next}, + {"\"max_bytes_used\": 42000$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_empty\",%csv_report$"}}); + +int main(int argc, char* argv[]) { + std::unique_ptr mm(new TestMemoryManager()); + + benchmark::RegisterMemoryManager(mm.get()); + RunOutputTests(argc, argv); + benchmark::RegisterMemoryManager(nullptr); +} diff --git a/benchmarks/thirdparty/benchmark/test/multiple_ranges_test.cc b/benchmarks/thirdparty/benchmark/test/multiple_ranges_test.cc new file mode 100755 index 0000000000..b25f40eb52 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/test/multiple_ranges_test.cc @@ -0,0 +1,96 @@ +#include "benchmark/benchmark.h" + +#include +#include +#include +#include + +class MultipleRangesFixture : public ::benchmark::Fixture { + public: + MultipleRangesFixture() + : expectedValues({{1, 3, 5}, + {1, 3, 8}, + {1, 3, 15}, + {2, 3, 5}, + {2, 3, 8}, + {2, 3, 15}, + {1, 4, 5}, + {1, 4, 8}, + {1, 4, 15}, + {2, 4, 5}, + {2, 4, 8}, + {2, 4, 15}, + {1, 7, 5}, + {1, 7, 8}, + {1, 7, 15}, + {2, 7, 5}, + {2, 7, 8}, + {2, 7, 15}, + {7, 6, 3}}) {} + + void SetUp(const ::benchmark::State& state) { + std::vector ranges = {state.range(0), state.range(1), + state.range(2)}; + + assert(expectedValues.find(ranges) != expectedValues.end()); + + actualValues.insert(ranges); + } + + // NOTE: This is not TearDown as we want to check after _all_ runs are + // complete. + virtual ~MultipleRangesFixture() { + if (actualValues != expectedValues) { + std::cout << "EXPECTED\n"; + for (auto v : expectedValues) { + std::cout << "{"; + for (int64_t iv : v) { + std::cout << iv << ", "; + } + std::cout << "}\n"; + } + std::cout << "ACTUAL\n"; + for (auto v : actualValues) { + std::cout << "{"; + for (int64_t iv : v) { + std::cout << iv << ", "; + } + std::cout << "}\n"; + } + } + } + + std::set> expectedValues; + std::set> actualValues; +}; + +BENCHMARK_DEFINE_F(MultipleRangesFixture, Empty)(benchmark::State& state) { + for (auto _ : state) { + int64_t product = state.range(0) * state.range(1) * state.range(2); + for (int64_t x = 0; x < product; x++) { + benchmark::DoNotOptimize(x); + } + } +} + +BENCHMARK_REGISTER_F(MultipleRangesFixture, Empty) + ->RangeMultiplier(2) + ->Ranges({{1, 2}, {3, 7}, {5, 15}}) + ->Args({7, 6, 3}); + +void BM_CheckDefaultArgument(benchmark::State& state) { + // Test that the 'range()' without an argument is the same as 'range(0)'. + assert(state.range() == state.range(0)); + assert(state.range() != state.range(1)); + for (auto _ : state) { + } +} +BENCHMARK(BM_CheckDefaultArgument)->Ranges({{1, 5}, {6, 10}}); + +static void BM_MultipleRanges(benchmark::State& st) { + for (auto _ : st) { + } +} +BENCHMARK(BM_MultipleRanges)->Ranges({{5, 5}, {6, 6}}); + +BENCHMARK_MAIN(); diff --git a/benchmarks/thirdparty/benchmark/test/options_test.cc b/benchmarks/thirdparty/benchmark/test/options_test.cc new file mode 100755 index 0000000000..7bfc235465 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/test/options_test.cc @@ -0,0 +1,75 @@ +#include "benchmark/benchmark.h" +#include +#include + +#if defined(NDEBUG) +#undef NDEBUG +#endif +#include + +void BM_basic(benchmark::State& state) { + for (auto _ : state) { + } +} + +void BM_basic_slow(benchmark::State& state) { + std::chrono::milliseconds sleep_duration(state.range(0)); + for (auto _ : state) { + std::this_thread::sleep_for( + std::chrono::duration_cast(sleep_duration)); + } +} + +BENCHMARK(BM_basic); +BENCHMARK(BM_basic)->Arg(42); +BENCHMARK(BM_basic_slow)->Arg(10)->Unit(benchmark::kNanosecond); +BENCHMARK(BM_basic_slow)->Arg(100)->Unit(benchmark::kMicrosecond); +BENCHMARK(BM_basic_slow)->Arg(1000)->Unit(benchmark::kMillisecond); +BENCHMARK(BM_basic)->Range(1, 8); +BENCHMARK(BM_basic)->RangeMultiplier(2)->Range(1, 8); +BENCHMARK(BM_basic)->DenseRange(10, 15); +BENCHMARK(BM_basic)->Args({42, 42}); +BENCHMARK(BM_basic)->Ranges({{64, 512}, {64, 512}}); +BENCHMARK(BM_basic)->MinTime(0.7); +BENCHMARK(BM_basic)->UseRealTime(); +BENCHMARK(BM_basic)->ThreadRange(2, 4); +BENCHMARK(BM_basic)->ThreadPerCpu(); +BENCHMARK(BM_basic)->Repetitions(3); +BENCHMARK(BM_basic) + ->RangeMultiplier(std::numeric_limits::max()) + ->Range(std::numeric_limits::min(), + std::numeric_limits::max()); + +// Negative ranges +BENCHMARK(BM_basic)->Range(-64, -1); +BENCHMARK(BM_basic)->RangeMultiplier(4)->Range(-8, 8); +BENCHMARK(BM_basic)->DenseRange(-2, 2, 1); +BENCHMARK(BM_basic)->Ranges({{-64, 1}, {-8, -1}}); + +void CustomArgs(benchmark::internal::Benchmark* b) { + for (int i = 0; i < 10; ++i) { + b->Arg(i); + } +} + +BENCHMARK(BM_basic)->Apply(CustomArgs); + +void BM_explicit_iteration_count(benchmark::State& state) { + // Test that benchmarks specified with an explicit iteration count are + // only run once. + static bool invoked_before = false; + assert(!invoked_before); + invoked_before = true; + + // Test that the requested iteration count is respected. + assert(state.max_iterations == 42); + size_t actual_iterations = 0; + for (auto _ : state) + ++actual_iterations; + assert(state.iterations() == state.max_iterations); + assert(state.iterations() == 42); + +} +BENCHMARK(BM_explicit_iteration_count)->Iterations(42); + +BENCHMARK_MAIN(); diff --git a/benchmarks/thirdparty/benchmark/test/output_test.h b/benchmarks/thirdparty/benchmark/test/output_test.h new file mode 100755 index 0000000000..9385761b21 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/test/output_test.h @@ -0,0 +1,213 @@ +#ifndef TEST_OUTPUT_TEST_H +#define TEST_OUTPUT_TEST_H + +#undef NDEBUG +#include +#include +#include +#include +#include +#include +#include + +#include "../src/re.h" +#include "benchmark/benchmark.h" + +#define CONCAT2(x, y) x##y +#define CONCAT(x, y) CONCAT2(x, y) + +#define ADD_CASES(...) int CONCAT(dummy, __LINE__) = ::AddCases(__VA_ARGS__) + +#define SET_SUBSTITUTIONS(...) \ + int CONCAT(dummy, __LINE__) = ::SetSubstitutions(__VA_ARGS__) + +enum MatchRules { + MR_Default, // Skip non-matching lines until a match is found. + MR_Next, // Match must occur on the next line. + MR_Not // No line between the current position and the next match matches + // the regex +}; + +struct TestCase { + TestCase(std::string re, int rule = MR_Default); + + std::string regex_str; + int match_rule; + std::string substituted_regex; + std::shared_ptr regex; +}; + +enum TestCaseID { + TC_ConsoleOut, + TC_ConsoleErr, + TC_JSONOut, + TC_JSONErr, + TC_CSVOut, + TC_CSVErr, + + TC_NumID // PRIVATE +}; + +// Add a list of test cases to be run against the output specified by +// 'ID' +int AddCases(TestCaseID ID, std::initializer_list il); + +// Add or set a list of substitutions to be performed on constructed regex's +// See 'output_test_helper.cc' for a list of default substitutions. +int SetSubstitutions( + std::initializer_list> il); + +// Run all output tests. +void RunOutputTests(int argc, char* argv[]); + +// Count the number of 'pat' substrings in the 'haystack' string. +int SubstrCnt(const std::string& haystack, const std::string& pat); + +// Run registered benchmarks with file reporter enabled, and return the content +// outputted by the file reporter. +std::string GetFileReporterOutput(int argc, char* argv[]); + +// ========================================================================= // +// ------------------------- Results checking ------------------------------ // +// ========================================================================= // + +// Call this macro to register a benchmark for checking its results. This +// should be all that's needed. It subscribes a function to check the (CSV) +// results of a benchmark. This is done only after verifying that the output +// strings are really as expected. +// bm_name_pattern: a name or a regex pattern which will be matched against +// all the benchmark names. Matching benchmarks +// will be the subject of a call to checker_function +// checker_function: should be of type ResultsCheckFn (see below) +#define CHECK_BENCHMARK_RESULTS(bm_name_pattern, checker_function) \ + size_t CONCAT(dummy, __LINE__) = AddChecker(bm_name_pattern, checker_function) + +struct Results; +typedef std::function ResultsCheckFn; + +size_t AddChecker(const char* bm_name_pattern, ResultsCheckFn fn); + +// Class holding the results of a benchmark. +// It is passed in calls to checker functions. +struct Results { + // the benchmark name + std::string name; + // the benchmark fields + std::map values; + + Results(const std::string& n) : name(n) {} + + int NumThreads() const; + + double NumIterations() const; + + typedef enum { kCpuTime, kRealTime } BenchmarkTime; + + // get cpu_time or real_time in seconds + double GetTime(BenchmarkTime which) const; + + // get the real_time duration of the benchmark in seconds. + // it is better to use fuzzy float checks for this, as the float + // ASCII formatting is lossy. + double DurationRealTime() const { + return NumIterations() * GetTime(kRealTime); + } + // get the cpu_time duration of the benchmark in seconds + double DurationCPUTime() const { + return NumIterations() * GetTime(kCpuTime); + } + + // get the string for a result by name, or nullptr if the name + // is not found + const std::string* Get(const char* entry_name) const { + auto it = values.find(entry_name); + if (it == values.end()) return nullptr; + return &it->second; + } + + // get a result by name, parsed as a specific type. + // NOTE: for counters, use GetCounterAs instead. + template + T GetAs(const char* entry_name) const; + + // counters are written as doubles, so they have to be read first + // as a double, and only then converted to the asked type. + template + T GetCounterAs(const char* entry_name) const { + double dval = GetAs(entry_name); + T tval = static_cast(dval); + return tval; + } +}; + +template +T Results::GetAs(const char* entry_name) const { + auto* sv = Get(entry_name); + CHECK(sv != nullptr && !sv->empty()); + std::stringstream ss; + ss << *sv; + T out; + ss >> out; + CHECK(!ss.fail()); + return out; +} + +//---------------------------------- +// Macros to help in result checking. Do not use them with arguments causing +// side-effects. + +// clang-format off + +#define _CHECK_RESULT_VALUE(entry, getfn, var_type, var_name, relationship, value) \ + CONCAT(CHECK_, relationship) \ + (entry.getfn< var_type >(var_name), (value)) << "\n" \ + << __FILE__ << ":" << __LINE__ << ": " << (entry).name << ":\n" \ + << __FILE__ << ":" << __LINE__ << ": " \ + << "expected (" << #var_type << ")" << (var_name) \ + << "=" << (entry).getfn< var_type >(var_name) \ + << " to be " #relationship " to " << (value) << "\n" + +// check with tolerance. eps_factor is the tolerance window, which is +// interpreted relative to value (eg, 0.1 means 10% of value). +#define _CHECK_FLOAT_RESULT_VALUE(entry, getfn, var_type, var_name, relationship, value, eps_factor) \ + CONCAT(CHECK_FLOAT_, relationship) \ + (entry.getfn< var_type >(var_name), (value), (eps_factor) * (value)) << "\n" \ + << __FILE__ << ":" << __LINE__ << ": " << (entry).name << ":\n" \ + << __FILE__ << ":" << __LINE__ << ": " \ + << "expected (" << #var_type << ")" << (var_name) \ + << "=" << (entry).getfn< var_type >(var_name) \ + << " to be " #relationship " to " << (value) << "\n" \ + << __FILE__ << ":" << __LINE__ << ": " \ + << "with tolerance of " << (eps_factor) * (value) \ + << " (" << (eps_factor)*100. << "%), " \ + << "but delta was " << ((entry).getfn< var_type >(var_name) - (value)) \ + << " (" << (((entry).getfn< var_type >(var_name) - (value)) \ + / \ + ((value) > 1.e-5 || value < -1.e-5 ? value : 1.e-5)*100.) \ + << "%)" + +#define CHECK_RESULT_VALUE(entry, var_type, var_name, relationship, value) \ + _CHECK_RESULT_VALUE(entry, GetAs, var_type, var_name, relationship, value) + +#define CHECK_COUNTER_VALUE(entry, var_type, var_name, relationship, value) \ + _CHECK_RESULT_VALUE(entry, GetCounterAs, var_type, var_name, relationship, value) + +#define CHECK_FLOAT_RESULT_VALUE(entry, var_name, relationship, value, eps_factor) \ + _CHECK_FLOAT_RESULT_VALUE(entry, GetAs, double, var_name, relationship, value, eps_factor) + +#define CHECK_FLOAT_COUNTER_VALUE(entry, var_name, relationship, value, eps_factor) \ + _CHECK_FLOAT_RESULT_VALUE(entry, GetCounterAs, double, var_name, relationship, value, eps_factor) + +// clang-format on + +// ========================================================================= // +// --------------------------- Misc Utilities ------------------------------ // +// ========================================================================= // + +namespace { + +const char* const dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?"; + +} // end namespace + +#endif // TEST_OUTPUT_TEST_H diff --git a/benchmarks/thirdparty/benchmark/test/output_test_helper.cc b/benchmarks/thirdparty/benchmark/test/output_test_helper.cc new file mode 100755 index 0000000000..f99b3a8261 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/test/output_test_helper.cc @@ -0,0 +1,515 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../src/benchmark_api_internal.h" +#include "../src/check.h" // NOTE: check.h is for internal use only! +#include "../src/re.h" // NOTE: re.h is for internal use only +#include "output_test.h" + +// ========================================================================= // +// ------------------------------ Internals -------------------------------- // +// ========================================================================= // +namespace internal { +namespace { + +using TestCaseList = std::vector; + +// Use a vector because the order elements are added matters during iteration. +// std::map/unordered_map don't guarantee that. +// For example: +// SetSubstitutions({{"%HelloWorld", "Hello"}, {"%Hello", "Hi"}}); +// Substitute("%HelloWorld") // Always expands to Hello. +using SubMap = std::vector>; + +TestCaseList& GetTestCaseList(TestCaseID ID) { + // Uses function-local statics to ensure initialization occurs + // before first use. + static TestCaseList lists[TC_NumID]; + return lists[ID]; +} + +SubMap& GetSubstitutions() { + // Don't use 'dec_re' from header because it may not yet be initialized. + // clang-format off + static std::string safe_dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?"; + static std::string time_re = "([0-9]+[.])?[0-9]+"; + static SubMap map = { + {"%float", "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?"}, + // human-readable float + {"%hrfloat", "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?[kMGTPEZYmunpfazy]?"}, + {"%int", "[ ]*[0-9]+"}, + {" %s ", "[ ]+"}, + {"%time", "[ ]*" + time_re + "[ ]+ns"}, + {"%console_report", "[ ]*" + time_re + "[ ]+ns [ ]*" + time_re + "[ ]+ns [ ]*[0-9]+"}, + {"%console_time_only_report", "[ ]*" + time_re + "[ ]+ns [ ]*" + time_re + "[ ]+ns"}, + {"%console_us_report", "[ ]*" + time_re + "[ ]+us [ ]*" + time_re + "[ ]+us [ ]*[0-9]+"}, + {"%console_us_time_only_report", "[ ]*" + time_re + "[ ]+us [ ]*" + time_re + "[ ]+us"}, + {"%csv_header", + "name,iterations,real_time,cpu_time,time_unit,bytes_per_second," + "items_per_second,label,error_occurred,error_message"}, + {"%csv_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,,,,,"}, + {"%csv_us_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",us,,,,,"}, + {"%csv_bytes_report", + "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns," + safe_dec_re + ",,,,"}, + {"%csv_items_report", + "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,," + safe_dec_re + ",,,"}, + {"%csv_bytes_items_report", + "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns," + safe_dec_re + + "," + safe_dec_re + ",,,"}, + {"%csv_label_report_begin", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,,,"}, + {"%csv_label_report_end", ",,"}}; + // clang-format on + return map; +} + +std::string PerformSubstitutions(std::string source) { + SubMap const& subs = GetSubstitutions(); + using SizeT = std::string::size_type; + for (auto const& KV : subs) { + SizeT pos; + SizeT next_start = 0; + while ((pos = source.find(KV.first, next_start)) != std::string::npos) { + next_start = pos + KV.second.size(); + source.replace(pos, KV.first.size(), KV.second); + } + } + return source; +} + +void CheckCase(std::stringstream& remaining_output, TestCase const& TC, + TestCaseList const& not_checks) { + std::string first_line; + bool on_first = true; + std::string line; + while (remaining_output.eof() == false) { + CHECK(remaining_output.good()); + std::getline(remaining_output, line); + if (on_first) { + first_line = line; + on_first = false; + } + for (const auto& NC : not_checks) { + CHECK(!NC.regex->Match(line)) + << "Unexpected match for line \"" << line << "\" for MR_Not regex \"" + << NC.regex_str << "\"" + << "\n actual regex string \"" << TC.substituted_regex << "\"" + << "\n started matching near: " << first_line; + } + if (TC.regex->Match(line)) return; + CHECK(TC.match_rule != MR_Next) + << "Expected line \"" << line << "\" to match regex \"" << TC.regex_str + << "\"" + << "\n actual regex string \"" << TC.substituted_regex << "\"" + << "\n started matching near: " << first_line; + } + CHECK(remaining_output.eof() == false) + << "End of output reached before match for regex \"" << TC.regex_str + << "\" was found" + << "\n actual regex string \"" << TC.substituted_regex << "\"" + << "\n started matching near: " << first_line; +} + +void CheckCases(TestCaseList const& checks, std::stringstream& output) { + std::vector not_checks; + for (size_t i = 0; i < checks.size(); ++i) { + const auto& TC = checks[i]; + if (TC.match_rule == MR_Not) { + not_checks.push_back(TC); + continue; + } + CheckCase(output, TC, not_checks); + not_checks.clear(); + } +} + +class TestReporter : public benchmark::BenchmarkReporter { + public: + TestReporter(std::vector reps) + : reporters_(reps) {} + + virtual bool ReportContext(const Context& context) { + bool last_ret = false; + bool first = true; + for (auto rep : reporters_) { + bool new_ret = rep->ReportContext(context); + CHECK(first || new_ret == last_ret) + << "Reports return different values for ReportContext"; + first = false; + last_ret = new_ret; + } + (void)first; + return last_ret; + } + + void ReportRuns(const std::vector& report) { + for (auto rep : reporters_) rep->ReportRuns(report); + } + void Finalize() { + for (auto rep : reporters_) rep->Finalize(); + } + + private: + std::vector reporters_; +}; +} // namespace + +} // end namespace internal + +// ========================================================================= // +// -------------------------- Results checking ----------------------------- // +// ========================================================================= // + +namespace internal { + +// Utility class to manage subscribers for checking benchmark results. +// It works by parsing the CSV output to read the results. +class ResultsChecker { + public: + struct PatternAndFn : public TestCase { // reusing TestCase for its regexes + PatternAndFn(const std::string& rx, ResultsCheckFn fn_) + : TestCase(rx), fn(fn_) {} + ResultsCheckFn fn; + }; + + std::vector check_patterns; + std::vector results; + std::vector field_names; + + void Add(const std::string& entry_pattern, ResultsCheckFn fn); + + void CheckResults(std::stringstream& output); + + private: + void SetHeader_(const std::string& csv_header); + void SetValues_(const std::string& entry_csv_line); + + std::vector SplitCsv_(const std::string& line); +}; + +// store the static ResultsChecker in a function to prevent initialization +// order problems +ResultsChecker& GetResultsChecker() { + static ResultsChecker rc; + return rc; +} + +// add a results checker for a benchmark +void ResultsChecker::Add(const std::string& entry_pattern, ResultsCheckFn fn) { + check_patterns.emplace_back(entry_pattern, fn); +} + +// check the results of all subscribed benchmarks +void ResultsChecker::CheckResults(std::stringstream& output) { + // first reset the stream to the start + { + auto start = std::stringstream::pos_type(0); + // clear before calling tellg() + output.clear(); + // seek to zero only when needed + if (output.tellg() > start) output.seekg(start); + // and just in case + output.clear(); + } + // now go over every line and publish it to the ResultsChecker + std::string line; + bool on_first = true; + while (output.eof() == false) { + CHECK(output.good()); + std::getline(output, line); + if (on_first) { + SetHeader_(line); // this is important + on_first = false; + continue; + } + SetValues_(line); + } + // finally we can call the subscribed check functions + for (const auto& p : check_patterns) { + VLOG(2) << "--------------------------------\n"; + VLOG(2) << "checking for benchmarks matching " << p.regex_str << "...\n"; + for (const auto& r : results) { + if (!p.regex->Match(r.name)) { + VLOG(2) << p.regex_str << " is not matched by " << r.name << "\n"; + continue; + } else { + VLOG(2) << p.regex_str << " is matched by " << r.name << "\n"; + } + VLOG(1) << "Checking results of " << r.name << ": ... \n"; + p.fn(r); + VLOG(1) << "Checking results of " << r.name << ": OK.\n"; + } + } +} + +// prepare for the names in this header +void ResultsChecker::SetHeader_(const std::string& csv_header) { + field_names = SplitCsv_(csv_header); +} + +// set the values for a benchmark +void ResultsChecker::SetValues_(const std::string& entry_csv_line) { + if (entry_csv_line.empty()) return; // some lines are empty + CHECK(!field_names.empty()); + auto vals = SplitCsv_(entry_csv_line); + CHECK_EQ(vals.size(), field_names.size()); + results.emplace_back(vals[0]); // vals[0] is the benchmark name + auto& entry = results.back(); + for (size_t i = 1, e = vals.size(); i < e; ++i) { + entry.values[field_names[i]] = vals[i]; + } +} + +// a quick'n'dirty csv splitter (eliminating quotes) +std::vector ResultsChecker::SplitCsv_(const std::string& line) { + std::vector out; + if (line.empty()) return out; + if (!field_names.empty()) out.reserve(field_names.size()); + size_t prev = 0, pos = line.find_first_of(','), curr = pos; + while (pos != line.npos) { + CHECK(curr > 0); + if (line[prev] == '"') ++prev; + if (line[curr - 1] == '"') --curr; + out.push_back(line.substr(prev, curr - prev)); + prev = pos + 1; + pos = line.find_first_of(',', pos + 1); + curr = pos; + } + curr = line.size(); + if (line[prev] == '"') ++prev; + if (line[curr - 1] == '"') --curr; + out.push_back(line.substr(prev, curr - prev)); + return out; +} + +} // end namespace internal + +size_t AddChecker(const char* bm_name, ResultsCheckFn fn) { + auto& rc = internal::GetResultsChecker(); + rc.Add(bm_name, fn); + return rc.results.size(); +} + +int Results::NumThreads() const { + auto pos = name.find("/threads:"); + if (pos == name.npos) return 1; + auto end = name.find('/', pos + 9); + std::stringstream ss; + ss << name.substr(pos + 9, end); + int num = 1; + ss >> num; + CHECK(!ss.fail()); + return num; +} + +double Results::NumIterations() const { + return GetAs("iterations"); +} + +double Results::GetTime(BenchmarkTime which) const { + CHECK(which == kCpuTime || which == kRealTime); + const char* which_str = which == kCpuTime ? "cpu_time" : "real_time"; + double val = GetAs(which_str); + auto unit = Get("time_unit"); + CHECK(unit); + if (*unit == "ns") { + return val * 1.e-9; + } else if (*unit == "us") { + return val * 1.e-6; + } else if (*unit == "ms") { + return val * 1.e-3; + } else if (*unit == "s") { + return val; + } else { + CHECK(1 == 0) << "unknown time unit: " << *unit; + return 0; + } +} + +// ========================================================================= // +// -------------------------- Public API Definitions------------------------ // +// ========================================================================= // + +TestCase::TestCase(std::string re, int rule) + : regex_str(std::move(re)), + match_rule(rule), + substituted_regex(internal::PerformSubstitutions(regex_str)), + regex(std::make_shared()) { + std::string err_str; + regex->Init(substituted_regex, &err_str); + CHECK(err_str.empty()) << "Could not construct regex \"" << substituted_regex + << "\"" + << "\n originally \"" << regex_str << "\"" + << "\n got error: " << err_str; +} + +int AddCases(TestCaseID ID, std::initializer_list il) { + auto& L = internal::GetTestCaseList(ID); + L.insert(L.end(), il); + return 0; +} + +int SetSubstitutions( + std::initializer_list> il) { + auto& subs = internal::GetSubstitutions(); + for (auto KV : il) { + bool exists = false; + KV.second = internal::PerformSubstitutions(KV.second); + for (auto& EKV : subs) { + if (EKV.first == KV.first) { + EKV.second = std::move(KV.second); + exists = true; + break; + } + } + if (!exists) subs.push_back(std::move(KV)); + } + return 0; +} + +// Disable deprecated warnings temporarily because we need to reference +// CSVReporter but don't want to trigger -Werror=-Wdeprecated-declarations +#ifdef __GNUC__ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" +#endif +void RunOutputTests(int argc, char* argv[]) { + using internal::GetTestCaseList; + benchmark::Initialize(&argc, argv); + auto options = benchmark::internal::GetOutputOptions(/*force_no_color*/ true); + benchmark::ConsoleReporter CR(options); + benchmark::JSONReporter JR; + benchmark::CSVReporter CSVR; + struct ReporterTest { + const char* name; + std::vector& output_cases; + std::vector& error_cases; + benchmark::BenchmarkReporter& reporter; + std::stringstream out_stream; + std::stringstream err_stream; + + ReporterTest(const char* n, std::vector& out_tc, + std::vector& err_tc, + benchmark::BenchmarkReporter& br) + : name(n), output_cases(out_tc), error_cases(err_tc), reporter(br) { + reporter.SetOutputStream(&out_stream); + reporter.SetErrorStream(&err_stream); + } + } TestCases[] = { + {"ConsoleReporter", GetTestCaseList(TC_ConsoleOut), + GetTestCaseList(TC_ConsoleErr), CR}, + {"JSONReporter", GetTestCaseList(TC_JSONOut), GetTestCaseList(TC_JSONErr), + JR}, + {"CSVReporter", GetTestCaseList(TC_CSVOut), GetTestCaseList(TC_CSVErr), + CSVR}, + }; + + // Create the test reporter and run the benchmarks. + std::cout << "Running benchmarks...\n"; + internal::TestReporter test_rep({&CR, &JR, &CSVR}); + benchmark::RunSpecifiedBenchmarks(&test_rep); + + for (auto& rep_test : TestCases) { + std::string msg = std::string("\nTesting ") + rep_test.name + " Output\n"; + std::string banner(msg.size() - 1, '-'); + std::cout << banner << msg << banner << "\n"; + + std::cerr << rep_test.err_stream.str(); + std::cout << rep_test.out_stream.str(); + + internal::CheckCases(rep_test.error_cases, rep_test.err_stream); + internal::CheckCases(rep_test.output_cases, rep_test.out_stream); + + std::cout << "\n"; + } + + // now that we know the output is as expected, we can dispatch + // the checks to subscribees. + auto& csv = TestCases[2]; + // would use == but gcc spits a warning + CHECK(std::strcmp(csv.name, "CSVReporter") == 0); + internal::GetResultsChecker().CheckResults(csv.out_stream); +} + +#ifdef __GNUC__ +#pragma GCC diagnostic pop +#endif + +int SubstrCnt(const std::string& haystack, const std::string& pat) { + if (pat.length() == 0) return 0; + int count = 0; + for (size_t offset = haystack.find(pat); offset != std::string::npos; + offset = haystack.find(pat, offset + pat.length())) + ++count; + return count; +} + +static char ToHex(int ch) { + return ch < 10 ? static_cast('0' + ch) + : static_cast('a' + (ch - 10)); +} + +static char RandomHexChar() { + static std::mt19937 rd{std::random_device{}()}; + static std::uniform_int_distribution mrand{0, 15}; + return ToHex(mrand(rd)); +} + +static std::string GetRandomFileName() { + std::string model = "test.%%%%%%"; + for (auto & ch : model) { + if (ch == '%') + ch = RandomHexChar(); + } + return model; +} + +static bool FileExists(std::string const& name) { + std::ifstream in(name.c_str()); + return in.good(); +} + +static std::string GetTempFileName() { + // This function attempts to avoid race conditions where two tests + // create the same file at the same time. However, it still introduces races + // similar to tmpnam. + int retries = 3; + while (--retries) { + std::string name = GetRandomFileName(); + if (!FileExists(name)) + return name; + } + std::cerr << "Failed to create unique temporary file name" << std::endl; + std::abort(); +} + +std::string GetFileReporterOutput(int argc, char* argv[]) { + std::vector new_argv(argv, argv + argc); + assert(static_cast(argc) == new_argv.size()); + + std::string tmp_file_name = GetTempFileName(); + std::cout << "Will be using this as the tmp file: " << tmp_file_name << '\n'; + + std::string tmp = "--benchmark_out="; + tmp += tmp_file_name; + new_argv.emplace_back(const_cast(tmp.c_str())); + + argc = int(new_argv.size()); + + benchmark::Initialize(&argc, new_argv.data()); + benchmark::RunSpecifiedBenchmarks(); + + // Read the output back from the file, and delete the file. + std::ifstream tmp_stream(tmp_file_name); + std::string output = std::string((std::istreambuf_iterator(tmp_stream)), + std::istreambuf_iterator()); + std::remove(tmp_file_name.c_str()); + + return output; +} diff --git a/benchmarks/thirdparty/benchmark/test/register_benchmark_test.cc b/benchmarks/thirdparty/benchmark/test/register_benchmark_test.cc new file mode 100755 index 0000000000..3ac5b21fb3 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/test/register_benchmark_test.cc @@ -0,0 +1,184 @@ + +#undef NDEBUG +#include +#include + +#include "../src/check.h" // NOTE: check.h is for internal use only! +#include "benchmark/benchmark.h" + +namespace { + +class TestReporter : public benchmark::ConsoleReporter { + public: + virtual void ReportRuns(const std::vector& report) { + all_runs_.insert(all_runs_.end(), begin(report), end(report)); + ConsoleReporter::ReportRuns(report); + } + + std::vector all_runs_; +}; + +struct TestCase { + std::string name; + const char* label; + // Note: not explicit as we rely on it being converted through ADD_CASES. + TestCase(const char* xname) : TestCase(xname, nullptr) {} + TestCase(const char* xname, const char* xlabel) + : name(xname), label(xlabel) {} + + typedef benchmark::BenchmarkReporter::Run Run; + + void CheckRun(Run const& run) const { + // clang-format off + CHECK(name == run.benchmark_name()) << "expected " << name << " got " + << run.benchmark_name(); + if (label) { + CHECK(run.report_label == label) << "expected " << label << " got " + << run.report_label; + } else { + CHECK(run.report_label == ""); + } + // clang-format on + } +}; + +std::vector ExpectedResults; + +int AddCases(std::initializer_list const& v) { + for (auto N : v) { + ExpectedResults.push_back(N); + } + return 0; +} + +#define CONCAT(x, y) CONCAT2(x, y) +#define CONCAT2(x, y) x##y +#define ADD_CASES(...) int CONCAT(dummy, __LINE__) = AddCases({__VA_ARGS__}) + +} // end namespace + +typedef benchmark::internal::Benchmark* ReturnVal; + +//----------------------------------------------------------------------------// +// Test RegisterBenchmark with no additional arguments +//----------------------------------------------------------------------------// +void BM_function(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_function); +ReturnVal dummy = benchmark::RegisterBenchmark( + "BM_function_manual_registration", BM_function); +ADD_CASES({"BM_function"}, {"BM_function_manual_registration"}); + +//----------------------------------------------------------------------------// +// Test RegisterBenchmark with additional arguments +// Note: GCC <= 4.8 do not support this form of RegisterBenchmark because they +// reject the variadic pack expansion of lambda captures. +//----------------------------------------------------------------------------// +#ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK + +void BM_extra_args(benchmark::State& st, const char* label) { + for (auto _ : st) { + } + st.SetLabel(label); +} +int RegisterFromFunction() { + std::pair cases[] = { + {"test1", "One"}, {"test2", "Two"}, {"test3", "Three"}}; + for (auto const& c : cases) + benchmark::RegisterBenchmark(c.first, &BM_extra_args, c.second); + return 0; +} +int dummy2 = RegisterFromFunction(); +ADD_CASES({"test1", "One"}, {"test2", "Two"}, {"test3", "Three"}); + +#endif // BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK + +//----------------------------------------------------------------------------// +// Test RegisterBenchmark with different callable types +//----------------------------------------------------------------------------// + +struct CustomFixture { + void operator()(benchmark::State& st) { + for (auto _ : st) { + } + } +}; + +void TestRegistrationAtRuntime() { +#ifdef BENCHMARK_HAS_CXX11 + { + CustomFixture fx; + benchmark::RegisterBenchmark("custom_fixture", fx); + AddCases({"custom_fixture"}); + } +#endif +#ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK + { + const char* x = "42"; + auto capturing_lam = [=](benchmark::State& st) { + for (auto _ : st) { + } + st.SetLabel(x); + }; + benchmark::RegisterBenchmark("lambda_benchmark", capturing_lam); + AddCases({{"lambda_benchmark", x}}); + } +#endif +} + +// Test that all benchmarks, registered at either during static init or runtime, +// are run and the results are passed to the reported. +void RunTestOne() { + TestRegistrationAtRuntime(); + + TestReporter test_reporter; + benchmark::RunSpecifiedBenchmarks(&test_reporter); + + typedef benchmark::BenchmarkReporter::Run Run; + auto EB = ExpectedResults.begin(); + + for (Run const& run : test_reporter.all_runs_) { + assert(EB != ExpectedResults.end()); + EB->CheckRun(run); + ++EB; + } + assert(EB == ExpectedResults.end()); +} + +// Test that ClearRegisteredBenchmarks() clears all previously registered +// benchmarks. +// Also test that new benchmarks can be registered and ran afterwards. +void RunTestTwo() { + assert(ExpectedResults.size() != 0 && + "must have at least one registered benchmark"); + ExpectedResults.clear(); + benchmark::ClearRegisteredBenchmarks(); + + TestReporter test_reporter; + size_t num_ran = benchmark::RunSpecifiedBenchmarks(&test_reporter); + assert(num_ran == 0); + assert(test_reporter.all_runs_.begin() == test_reporter.all_runs_.end()); + + TestRegistrationAtRuntime(); + num_ran = benchmark::RunSpecifiedBenchmarks(&test_reporter); + assert(num_ran == ExpectedResults.size()); + + typedef benchmark::BenchmarkReporter::Run Run; + auto EB = ExpectedResults.begin(); + + for (Run const& run : test_reporter.all_runs_) { + assert(EB != ExpectedResults.end()); + EB->CheckRun(run); + ++EB; + } + assert(EB == ExpectedResults.end()); +} + +int main(int argc, char* argv[]) { + benchmark::Initialize(&argc, argv); + + RunTestOne(); + RunTestTwo(); +} diff --git a/benchmarks/thirdparty/benchmark/test/report_aggregates_only_test.cc b/benchmarks/thirdparty/benchmark/test/report_aggregates_only_test.cc new file mode 100755 index 0000000000..9646b9be53 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/test/report_aggregates_only_test.cc @@ -0,0 +1,39 @@ + +#undef NDEBUG +#include +#include + +#include "benchmark/benchmark.h" +#include "output_test.h" + +// Ok this test is super ugly. We want to check what happens with the file +// reporter in the presence of ReportAggregatesOnly(). +// We do not care about console output, the normal tests check that already. + +void BM_SummaryRepeat(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly(); + +int main(int argc, char* argv[]) { + const std::string output = GetFileReporterOutput(argc, argv); + + if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 3 || + SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_mean\"") != 1 || + SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_median\"") != + 1 || + SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"") != + 1) { + std::cout << "Precondition mismatch. Expected to only find three " + "occurrences of \"BM_SummaryRepeat/repeats:3\" substring:\n" + "\"name\": \"BM_SummaryRepeat/repeats:3_mean\", " + "\"name\": \"BM_SummaryRepeat/repeats:3_median\", " + "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"\nThe entire " + "output:\n"; + std::cout << output; + return 1; + } + + return 0; +} diff --git a/benchmarks/thirdparty/benchmark/test/reporter_output_test.cc b/benchmarks/thirdparty/benchmark/test/reporter_output_test.cc new file mode 100755 index 0000000000..bcce007831 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/test/reporter_output_test.cc @@ -0,0 +1,747 @@ + +#undef NDEBUG +#include + +#include "benchmark/benchmark.h" +#include "output_test.h" + +// ========================================================================= // +// ---------------------- Testing Prologue Output -------------------------- // +// ========================================================================= // + +ADD_CASES(TC_ConsoleOut, {{"^[-]+$", MR_Next}, + {"^Benchmark %s Time %s CPU %s Iterations$", MR_Next}, + {"^[-]+$", MR_Next}}); +static int AddContextCases() { + AddCases(TC_ConsoleErr, + { + {"^%int-%int-%intT%int:%int:%int[-+]%int:%int$", MR_Default}, + {"Running .*/reporter_output_test(\\.exe)?$", MR_Next}, + {"Run on \\(%int X %float MHz CPU s?\\)", MR_Next}, + }); + AddCases(TC_JSONOut, + {{"^\\{", MR_Default}, + {"\"context\":", MR_Next}, + {"\"date\": \"", MR_Next}, + {"\"host_name\":", MR_Next}, + {"\"executable\": \".*(/|\\\\)reporter_output_test(\\.exe)?\",", + MR_Next}, + {"\"num_cpus\": %int,$", MR_Next}, + {"\"mhz_per_cpu\": %float,$", MR_Next}, + {"\"caches\": \\[$", MR_Default}}); + auto const& Info = benchmark::CPUInfo::Get(); + auto const& Caches = Info.caches; + if (!Caches.empty()) { + AddCases(TC_ConsoleErr, {{"CPU Caches:$", MR_Next}}); + } + for (size_t I = 0; I < Caches.size(); ++I) { + std::string num_caches_str = + Caches[I].num_sharing != 0 ? " \\(x%int\\)$" : "$"; + AddCases(TC_ConsoleErr, + {{"L%int (Data|Instruction|Unified) %int KiB" + num_caches_str, + MR_Next}}); + AddCases(TC_JSONOut, {{"\\{$", MR_Next}, + {"\"type\": \"", MR_Next}, + {"\"level\": %int,$", MR_Next}, + {"\"size\": %int,$", MR_Next}, + {"\"num_sharing\": %int$", MR_Next}, + {"}[,]{0,1}$", MR_Next}}); + } + AddCases(TC_JSONOut, {{"],$"}}); + auto const& LoadAvg = Info.load_avg; + if (!LoadAvg.empty()) { + AddCases(TC_ConsoleErr, + {{"Load Average: (%float, ){0,2}%float$", MR_Next}}); + } + AddCases(TC_JSONOut, {{"\"load_avg\": \\[(%float,?){0,3}],$", MR_Next}}); + return 0; +} +int dummy_register = AddContextCases(); +ADD_CASES(TC_CSVOut, {{"%csv_header"}}); + +// ========================================================================= // +// ------------------------ Testing Basic Output --------------------------- // +// ========================================================================= // + +void BM_basic(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_basic); + +ADD_CASES(TC_ConsoleOut, {{"^BM_basic %console_report$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_basic\",$"}, + {"\"run_name\": \"BM_basic\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 0,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\"$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_basic\",%csv_report$"}}); + +// ========================================================================= // +// ------------------------ Testing Bytes per Second Output ---------------- // +// ========================================================================= // + +void BM_bytes_per_second(benchmark::State& state) { + for (auto _ : state) { + // This test requires a non-zero CPU time to avoid divide-by-zero + benchmark::DoNotOptimize(state.iterations()); + } + state.SetBytesProcessed(1); +} +BENCHMARK(BM_bytes_per_second); + +ADD_CASES(TC_ConsoleOut, {{"^BM_bytes_per_second %console_report " + "bytes_per_second=%float[kM]{0,1}/s$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_bytes_per_second\",$"}, + {"\"run_name\": \"BM_bytes_per_second\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 0,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bytes_per_second\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_bytes_per_second\",%csv_bytes_report$"}}); + +// ========================================================================= // +// ------------------------ Testing Items per Second Output ---------------- // +// ========================================================================= // + +void BM_items_per_second(benchmark::State& state) { + for (auto _ : state) { + // This test requires a non-zero CPU time to avoid divide-by-zero + benchmark::DoNotOptimize(state.iterations()); + } + state.SetItemsProcessed(1); +} +BENCHMARK(BM_items_per_second); + +ADD_CASES(TC_ConsoleOut, {{"^BM_items_per_second %console_report " + "items_per_second=%float[kM]{0,1}/s$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_items_per_second\",$"}, + {"\"run_name\": \"BM_items_per_second\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 0,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"items_per_second\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_items_per_second\",%csv_items_report$"}}); + +// ========================================================================= // +// ------------------------ Testing Label Output --------------------------- // +// ========================================================================= // + +void BM_label(benchmark::State& state) { + for (auto _ : state) { + } + state.SetLabel("some label"); +} +BENCHMARK(BM_label); + +ADD_CASES(TC_ConsoleOut, {{"^BM_label %console_report some label$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_label\",$"}, + {"\"run_name\": \"BM_label\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 0,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"label\": \"some label\"$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_label\",%csv_label_report_begin\"some " + "label\"%csv_label_report_end$"}}); + +// ========================================================================= // +// ------------------------ Testing Error Output --------------------------- // +// ========================================================================= // + +void BM_error(benchmark::State& state) { + state.SkipWithError("message"); + for (auto _ : state) { + } +} +BENCHMARK(BM_error); +ADD_CASES(TC_ConsoleOut, {{"^BM_error[ ]+ERROR OCCURRED: 'message'$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_error\",$"}, + {"\"run_name\": \"BM_error\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 0,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"error_occurred\": true,$", MR_Next}, + {"\"error_message\": \"message\",$", MR_Next}}); + +ADD_CASES(TC_CSVOut, {{"^\"BM_error\",,,,,,,,true,\"message\"$"}}); + +// ========================================================================= // +// ------------------------ Testing No Arg Name Output ----------------------- +// // +// ========================================================================= // + +void BM_no_arg_name(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_no_arg_name)->Arg(3); +ADD_CASES(TC_ConsoleOut, {{"^BM_no_arg_name/3 %console_report$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_no_arg_name/3\",$"}, + {"\"run_name\": \"BM_no_arg_name/3\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 0,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_no_arg_name/3\",%csv_report$"}}); + +// ========================================================================= // +// ------------------------ Testing Arg Name Output ----------------------- // +// ========================================================================= // + +void BM_arg_name(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_arg_name)->ArgName("first")->Arg(3); +ADD_CASES(TC_ConsoleOut, {{"^BM_arg_name/first:3 %console_report$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_arg_name/first:3\",$"}, + {"\"run_name\": \"BM_arg_name/first:3\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 0,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_arg_name/first:3\",%csv_report$"}}); + +// ========================================================================= // +// ------------------------ Testing Arg Names Output ----------------------- // +// ========================================================================= // + +void BM_arg_names(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_arg_names)->Args({2, 5, 4})->ArgNames({"first", "", "third"}); +ADD_CASES(TC_ConsoleOut, + {{"^BM_arg_names/first:2/5/third:4 %console_report$"}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_arg_names/first:2/5/third:4\",$"}, + {"\"run_name\": \"BM_arg_names/first:2/5/third:4\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 0,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_arg_names/first:2/5/third:4\",%csv_report$"}}); + +// ========================================================================= // +// ------------------------ Testing Big Args Output ------------------------ // +// ========================================================================= // + +void BM_BigArgs(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_BigArgs)->RangeMultiplier(2)->Range(1U << 30U, 1U << 31U); +ADD_CASES(TC_ConsoleOut, {{"^BM_BigArgs/1073741824 %console_report$"}, + {"^BM_BigArgs/2147483648 %console_report$"}}); + +// ========================================================================= // +// ----------------------- Testing Complexity Output ----------------------- // +// ========================================================================= // + +void BM_Complexity_O1(benchmark::State& state) { + for (auto _ : state) { + // This test requires a non-zero CPU time to avoid divide-by-zero + benchmark::DoNotOptimize(state.iterations()); + } + state.SetComplexityN(state.range(0)); +} +BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1); +SET_SUBSTITUTIONS({{"%bigOStr", "[ ]* %float \\([0-9]+\\)"}, + {"%RMS", "[ ]*[0-9]+ %"}}); +ADD_CASES(TC_ConsoleOut, {{"^BM_Complexity_O1_BigO %bigOStr %bigOStr[ ]*$"}, + {"^BM_Complexity_O1_RMS %RMS %RMS[ ]*$"}}); + +// ========================================================================= // +// ----------------------- Testing Aggregate Output ------------------------ // +// ========================================================================= // + +// Test that non-aggregate data is printed by default +void BM_Repeat(benchmark::State& state) { + for (auto _ : state) { + } +} +// need two repetitions min to be able to output any aggregate output +BENCHMARK(BM_Repeat)->Repetitions(2); +ADD_CASES(TC_ConsoleOut, + {{"^BM_Repeat/repeats:2 %console_report$"}, + {"^BM_Repeat/repeats:2 %console_report$"}, + {"^BM_Repeat/repeats:2_mean %console_time_only_report [ ]*2$"}, + {"^BM_Repeat/repeats:2_median %console_time_only_report [ ]*2$"}, + {"^BM_Repeat/repeats:2_stddev %console_time_only_report [ ]*2$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:2\",$"}, + {"\"run_name\": \"BM_Repeat/repeats:2\"", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"name\": \"BM_Repeat/repeats:2\",$"}, + {"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"repetition_index\": 1,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"name\": \"BM_Repeat/repeats:2_mean\",$"}, + {"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"iterations\": 2,$", MR_Next}, + {"\"name\": \"BM_Repeat/repeats:2_median\",$"}, + {"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"iterations\": 2,$", MR_Next}, + {"\"name\": \"BM_Repeat/repeats:2_stddev\",$"}, + {"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"iterations\": 2,$", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:2\",%csv_report$"}, + {"^\"BM_Repeat/repeats:2\",%csv_report$"}, + {"^\"BM_Repeat/repeats:2_mean\",%csv_report$"}, + {"^\"BM_Repeat/repeats:2_median\",%csv_report$"}, + {"^\"BM_Repeat/repeats:2_stddev\",%csv_report$"}}); +// but for two repetitions, mean and median is the same, so let's repeat.. +BENCHMARK(BM_Repeat)->Repetitions(3); +ADD_CASES(TC_ConsoleOut, + {{"^BM_Repeat/repeats:3 %console_report$"}, + {"^BM_Repeat/repeats:3 %console_report$"}, + {"^BM_Repeat/repeats:3 %console_report$"}, + {"^BM_Repeat/repeats:3_mean %console_time_only_report [ ]*3$"}, + {"^BM_Repeat/repeats:3_median %console_time_only_report [ ]*3$"}, + {"^BM_Repeat/repeats:3_stddev %console_time_only_report [ ]*3$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:3\",$"}, + {"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"name\": \"BM_Repeat/repeats:3\",$"}, + {"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"repetition_index\": 1,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"name\": \"BM_Repeat/repeats:3\",$"}, + {"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"repetition_index\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"name\": \"BM_Repeat/repeats:3_mean\",$"}, + {"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}, + {"\"name\": \"BM_Repeat/repeats:3_median\",$"}, + {"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}, + {"\"name\": \"BM_Repeat/repeats:3_stddev\",$"}, + {"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:3\",%csv_report$"}, + {"^\"BM_Repeat/repeats:3\",%csv_report$"}, + {"^\"BM_Repeat/repeats:3\",%csv_report$"}, + {"^\"BM_Repeat/repeats:3_mean\",%csv_report$"}, + {"^\"BM_Repeat/repeats:3_median\",%csv_report$"}, + {"^\"BM_Repeat/repeats:3_stddev\",%csv_report$"}}); +// median differs between even/odd number of repetitions, so just to be sure +BENCHMARK(BM_Repeat)->Repetitions(4); +ADD_CASES(TC_ConsoleOut, + {{"^BM_Repeat/repeats:4 %console_report$"}, + {"^BM_Repeat/repeats:4 %console_report$"}, + {"^BM_Repeat/repeats:4 %console_report$"}, + {"^BM_Repeat/repeats:4 %console_report$"}, + {"^BM_Repeat/repeats:4_mean %console_time_only_report [ ]*4$"}, + {"^BM_Repeat/repeats:4_median %console_time_only_report [ ]*4$"}, + {"^BM_Repeat/repeats:4_stddev %console_time_only_report [ ]*4$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:4\",$"}, + {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 4,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"name\": \"BM_Repeat/repeats:4\",$"}, + {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 4,$", MR_Next}, + {"\"repetition_index\": 1,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"name\": \"BM_Repeat/repeats:4\",$"}, + {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 4,$", MR_Next}, + {"\"repetition_index\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"name\": \"BM_Repeat/repeats:4\",$"}, + {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 4,$", MR_Next}, + {"\"repetition_index\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"name\": \"BM_Repeat/repeats:4_mean\",$"}, + {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 4,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"iterations\": 4,$", MR_Next}, + {"\"name\": \"BM_Repeat/repeats:4_median\",$"}, + {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 4,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"iterations\": 4,$", MR_Next}, + {"\"name\": \"BM_Repeat/repeats:4_stddev\",$"}, + {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 4,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"iterations\": 4,$", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:4\",%csv_report$"}, + {"^\"BM_Repeat/repeats:4\",%csv_report$"}, + {"^\"BM_Repeat/repeats:4\",%csv_report$"}, + {"^\"BM_Repeat/repeats:4\",%csv_report$"}, + {"^\"BM_Repeat/repeats:4_mean\",%csv_report$"}, + {"^\"BM_Repeat/repeats:4_median\",%csv_report$"}, + {"^\"BM_Repeat/repeats:4_stddev\",%csv_report$"}}); + +// Test that a non-repeated test still prints non-aggregate results even when +// only-aggregate reports have been requested +void BM_RepeatOnce(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_RepeatOnce)->Repetitions(1)->ReportAggregatesOnly(); +ADD_CASES(TC_ConsoleOut, {{"^BM_RepeatOnce/repeats:1 %console_report$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_RepeatOnce/repeats:1\",$"}, + {"\"run_name\": \"BM_RepeatOnce/repeats:1\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_RepeatOnce/repeats:1\",%csv_report$"}}); + +// Test that non-aggregate data is not reported +void BM_SummaryRepeat(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly(); +ADD_CASES( + TC_ConsoleOut, + {{".*BM_SummaryRepeat/repeats:3 ", MR_Not}, + {"^BM_SummaryRepeat/repeats:3_mean %console_time_only_report [ ]*3$"}, + {"^BM_SummaryRepeat/repeats:3_median %console_time_only_report [ ]*3$"}, + {"^BM_SummaryRepeat/repeats:3_stddev %console_time_only_report [ ]*3$"}}); +ADD_CASES(TC_JSONOut, + {{".*BM_SummaryRepeat/repeats:3 ", MR_Not}, + {"\"name\": \"BM_SummaryRepeat/repeats:3_mean\",$"}, + {"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}, + {"\"name\": \"BM_SummaryRepeat/repeats:3_median\",$"}, + {"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}, + {"\"name\": \"BM_SummaryRepeat/repeats:3_stddev\",$"}, + {"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}}); +ADD_CASES(TC_CSVOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not}, + {"^\"BM_SummaryRepeat/repeats:3_mean\",%csv_report$"}, + {"^\"BM_SummaryRepeat/repeats:3_median\",%csv_report$"}, + {"^\"BM_SummaryRepeat/repeats:3_stddev\",%csv_report$"}}); + +// Test that non-aggregate data is not displayed. +// NOTE: this test is kinda bad. we are only testing the display output. +// But we don't check that the file output still contains everything... +void BM_SummaryDisplay(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_SummaryDisplay)->Repetitions(2)->DisplayAggregatesOnly(); +ADD_CASES( + TC_ConsoleOut, + {{".*BM_SummaryDisplay/repeats:2 ", MR_Not}, + {"^BM_SummaryDisplay/repeats:2_mean %console_time_only_report [ ]*2$"}, + {"^BM_SummaryDisplay/repeats:2_median %console_time_only_report [ ]*2$"}, + {"^BM_SummaryDisplay/repeats:2_stddev %console_time_only_report [ ]*2$"}}); +ADD_CASES(TC_JSONOut, + {{".*BM_SummaryDisplay/repeats:2 ", MR_Not}, + {"\"name\": \"BM_SummaryDisplay/repeats:2_mean\",$"}, + {"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"iterations\": 2,$", MR_Next}, + {"\"name\": \"BM_SummaryDisplay/repeats:2_median\",$"}, + {"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"iterations\": 2,$", MR_Next}, + {"\"name\": \"BM_SummaryDisplay/repeats:2_stddev\",$"}, + {"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"iterations\": 2,$", MR_Next}}); +ADD_CASES(TC_CSVOut, + {{".*BM_SummaryDisplay/repeats:2 ", MR_Not}, + {"^\"BM_SummaryDisplay/repeats:2_mean\",%csv_report$"}, + {"^\"BM_SummaryDisplay/repeats:2_median\",%csv_report$"}, + {"^\"BM_SummaryDisplay/repeats:2_stddev\",%csv_report$"}}); + +// Test repeats with custom time unit. +void BM_RepeatTimeUnit(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_RepeatTimeUnit) + ->Repetitions(3) + ->ReportAggregatesOnly() + ->Unit(benchmark::kMicrosecond); +ADD_CASES( + TC_ConsoleOut, + {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not}, + {"^BM_RepeatTimeUnit/repeats:3_mean %console_us_time_only_report [ ]*3$"}, + {"^BM_RepeatTimeUnit/repeats:3_median %console_us_time_only_report [ " + "]*3$"}, + {"^BM_RepeatTimeUnit/repeats:3_stddev %console_us_time_only_report [ " + "]*3$"}}); +ADD_CASES(TC_JSONOut, + {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not}, + {"\"name\": \"BM_RepeatTimeUnit/repeats:3_mean\",$"}, + {"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}, + {"\"time_unit\": \"us\",?$"}, + {"\"name\": \"BM_RepeatTimeUnit/repeats:3_median\",$"}, + {"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}, + {"\"time_unit\": \"us\",?$"}, + {"\"name\": \"BM_RepeatTimeUnit/repeats:3_stddev\",$"}, + {"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}, + {"\"time_unit\": \"us\",?$"}}); +ADD_CASES(TC_CSVOut, + {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not}, + {"^\"BM_RepeatTimeUnit/repeats:3_mean\",%csv_us_report$"}, + {"^\"BM_RepeatTimeUnit/repeats:3_median\",%csv_us_report$"}, + {"^\"BM_RepeatTimeUnit/repeats:3_stddev\",%csv_us_report$"}}); + +// ========================================================================= // +// -------------------- Testing user-provided statistics ------------------- // +// ========================================================================= // + +const auto UserStatistics = [](const std::vector& v) { + return v.back(); +}; +void BM_UserStats(benchmark::State& state) { + for (auto _ : state) { + state.SetIterationTime(150 / 10e8); + } +} +// clang-format off +BENCHMARK(BM_UserStats) + ->Repetitions(3) + ->Iterations(5) + ->UseManualTime() + ->ComputeStatistics("", UserStatistics); +// clang-format on + +// check that user-provided stats is calculated, and is after the default-ones +// empty string as name is intentional, it would sort before anything else +ADD_CASES(TC_ConsoleOut, {{"^BM_UserStats/iterations:5/repeats:3/manual_time [ " + "]* 150 ns %time [ ]*5$"}, + {"^BM_UserStats/iterations:5/repeats:3/manual_time [ " + "]* 150 ns %time [ ]*5$"}, + {"^BM_UserStats/iterations:5/repeats:3/manual_time [ " + "]* 150 ns %time [ ]*5$"}, + {"^BM_UserStats/iterations:5/repeats:3/" + "manual_time_mean [ ]* 150 ns %time [ ]*3$"}, + {"^BM_UserStats/iterations:5/repeats:3/" + "manual_time_median [ ]* 150 ns %time [ ]*3$"}, + {"^BM_UserStats/iterations:5/repeats:3/" + "manual_time_stddev [ ]* 0.000 ns %time [ ]*3$"}, + {"^BM_UserStats/iterations:5/repeats:3/manual_time_ " + "[ ]* 150 ns %time [ ]*3$"}}); +ADD_CASES( + TC_JSONOut, + {{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"}, + {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", + MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": 5,$", MR_Next}, + {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, + {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"}, + {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", + MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"repetition_index\": 1,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": 5,$", MR_Next}, + {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, + {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"}, + {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", + MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"repetition_index\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": 5,$", MR_Next}, + {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, + {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_mean\",$"}, + {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", + MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}, + {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, + {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_median\",$"}, + {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", + MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}, + {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, + {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_stddev\",$"}, + {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", + MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_\",$"}, + {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", + MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}, + {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}}); +ADD_CASES( + TC_CSVOut, + {{"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"}, + {"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"}, + {"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"}, + {"^\"BM_UserStats/iterations:5/repeats:3/manual_time_mean\",%csv_report$"}, + {"^\"BM_UserStats/iterations:5/repeats:3/" + "manual_time_median\",%csv_report$"}, + {"^\"BM_UserStats/iterations:5/repeats:3/" + "manual_time_stddev\",%csv_report$"}, + {"^\"BM_UserStats/iterations:5/repeats:3/manual_time_\",%csv_report$"}}); + +// ========================================================================= // +// ------------------------- Testing StrEscape JSON ------------------------ // +// ========================================================================= // +#if 0 // enable when csv testing code correctly handles multi-line fields +void BM_JSON_Format(benchmark::State& state) { + state.SkipWithError("val\b\f\n\r\t\\\"with\"es,capes"); + for (auto _ : state) { + } +} +BENCHMARK(BM_JSON_Format); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_JSON_Format\",$"}, + {"\"run_name\": \"BM_JSON_Format\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 0,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"error_occurred\": true,$", MR_Next}, + {R"("error_message": "val\\b\\f\\n\\r\\t\\\\\\"with\\"es,capes",$)", MR_Next}}); +#endif +// ========================================================================= // +// -------------------------- Testing CsvEscape ---------------------------- // +// ========================================================================= // + +void BM_CSV_Format(benchmark::State& state) { + state.SkipWithError("\"freedom\""); + for (auto _ : state) { + } +} +BENCHMARK(BM_CSV_Format); +ADD_CASES(TC_CSVOut, {{"^\"BM_CSV_Format\",,,,,,,,true,\"\"\"freedom\"\"\"$"}}); + +// ========================================================================= // +// --------------------------- TEST CASES END ------------------------------ // +// ========================================================================= // + +int main(int argc, char* argv[]) { RunOutputTests(argc, argv); } diff --git a/benchmarks/thirdparty/benchmark/test/skip_with_error_test.cc b/benchmarks/thirdparty/benchmark/test/skip_with_error_test.cc new file mode 100755 index 0000000000..97a2e3c03b --- /dev/null +++ b/benchmarks/thirdparty/benchmark/test/skip_with_error_test.cc @@ -0,0 +1,195 @@ + +#undef NDEBUG +#include +#include + +#include "../src/check.h" // NOTE: check.h is for internal use only! +#include "benchmark/benchmark.h" + +namespace { + +class TestReporter : public benchmark::ConsoleReporter { + public: + virtual bool ReportContext(const Context& context) { + return ConsoleReporter::ReportContext(context); + }; + + virtual void ReportRuns(const std::vector& report) { + all_runs_.insert(all_runs_.end(), begin(report), end(report)); + ConsoleReporter::ReportRuns(report); + } + + TestReporter() {} + virtual ~TestReporter() {} + + mutable std::vector all_runs_; +}; + +struct TestCase { + std::string name; + bool error_occurred; + std::string error_message; + + typedef benchmark::BenchmarkReporter::Run Run; + + void CheckRun(Run const& run) const { + CHECK(name == run.benchmark_name()) + << "expected " << name << " got " << run.benchmark_name(); + CHECK(error_occurred == run.error_occurred); + CHECK(error_message == run.error_message); + if (error_occurred) { + // CHECK(run.iterations == 0); + } else { + CHECK(run.iterations != 0); + } + } +}; + +std::vector ExpectedResults; + +int AddCases(const char* base_name, std::initializer_list const& v) { + for (auto TC : v) { + TC.name = base_name + TC.name; + ExpectedResults.push_back(std::move(TC)); + } + return 0; +} + +#define CONCAT(x, y) CONCAT2(x, y) +#define CONCAT2(x, y) x##y +#define ADD_CASES(...) int CONCAT(dummy, __LINE__) = AddCases(__VA_ARGS__) + +} // end namespace + +void BM_error_no_running(benchmark::State& state) { + state.SkipWithError("error message"); +} +BENCHMARK(BM_error_no_running); +ADD_CASES("BM_error_no_running", {{"", true, "error message"}}); + +void BM_error_before_running(benchmark::State& state) { + state.SkipWithError("error message"); + while (state.KeepRunning()) { + assert(false); + } +} +BENCHMARK(BM_error_before_running); +ADD_CASES("BM_error_before_running", {{"", true, "error message"}}); + +void BM_error_before_running_batch(benchmark::State& state) { + state.SkipWithError("error message"); + while (state.KeepRunningBatch(17)) { + assert(false); + } +} +BENCHMARK(BM_error_before_running_batch); +ADD_CASES("BM_error_before_running_batch", {{"", true, "error message"}}); + +void BM_error_before_running_range_for(benchmark::State& state) { + state.SkipWithError("error message"); + for (auto _ : state) { + assert(false); + } +} +BENCHMARK(BM_error_before_running_range_for); +ADD_CASES("BM_error_before_running_range_for", {{"", true, "error message"}}); + +void BM_error_during_running(benchmark::State& state) { + int first_iter = true; + while (state.KeepRunning()) { + if (state.range(0) == 1 && state.thread_index <= (state.threads / 2)) { + assert(first_iter); + first_iter = false; + state.SkipWithError("error message"); + } else { + state.PauseTiming(); + state.ResumeTiming(); + } + } +} +BENCHMARK(BM_error_during_running)->Arg(1)->Arg(2)->ThreadRange(1, 8); +ADD_CASES("BM_error_during_running", {{"/1/threads:1", true, "error message"}, + {"/1/threads:2", true, "error message"}, + {"/1/threads:4", true, "error message"}, + {"/1/threads:8", true, "error message"}, + {"/2/threads:1", false, ""}, + {"/2/threads:2", false, ""}, + {"/2/threads:4", false, ""}, + {"/2/threads:8", false, ""}}); + +void BM_error_during_running_ranged_for(benchmark::State& state) { + assert(state.max_iterations > 3 && "test requires at least a few iterations"); + int first_iter = true; + // NOTE: Users should not write the for loop explicitly. + for (auto It = state.begin(), End = state.end(); It != End; ++It) { + if (state.range(0) == 1) { + assert(first_iter); + first_iter = false; + state.SkipWithError("error message"); + // Test the unfortunate but documented behavior that the ranged-for loop + // doesn't automatically terminate when SkipWithError is set. + assert(++It != End); + break; // Required behavior + } + } +} +BENCHMARK(BM_error_during_running_ranged_for)->Arg(1)->Arg(2)->Iterations(5); +ADD_CASES("BM_error_during_running_ranged_for", + {{"/1/iterations:5", true, "error message"}, + {"/2/iterations:5", false, ""}}); + +void BM_error_after_running(benchmark::State& state) { + for (auto _ : state) { + benchmark::DoNotOptimize(state.iterations()); + } + if (state.thread_index <= (state.threads / 2)) + state.SkipWithError("error message"); +} +BENCHMARK(BM_error_after_running)->ThreadRange(1, 8); +ADD_CASES("BM_error_after_running", {{"/threads:1", true, "error message"}, + {"/threads:2", true, "error message"}, + {"/threads:4", true, "error message"}, + {"/threads:8", true, "error message"}}); + +void BM_error_while_paused(benchmark::State& state) { + bool first_iter = true; + while (state.KeepRunning()) { + if (state.range(0) == 1 && state.thread_index <= (state.threads / 2)) { + assert(first_iter); + first_iter = false; + state.PauseTiming(); + state.SkipWithError("error message"); + } else { + state.PauseTiming(); + state.ResumeTiming(); + } + } +} +BENCHMARK(BM_error_while_paused)->Arg(1)->Arg(2)->ThreadRange(1, 8); +ADD_CASES("BM_error_while_paused", {{"/1/threads:1", true, "error message"}, + {"/1/threads:2", true, "error message"}, + {"/1/threads:4", true, "error message"}, + {"/1/threads:8", true, "error message"}, + {"/2/threads:1", false, ""}, + {"/2/threads:2", false, ""}, + {"/2/threads:4", false, ""}, + {"/2/threads:8", false, ""}}); + +int main(int argc, char* argv[]) { + benchmark::Initialize(&argc, argv); + + TestReporter test_reporter; + benchmark::RunSpecifiedBenchmarks(&test_reporter); + + typedef benchmark::BenchmarkReporter::Run Run; + auto EB = ExpectedResults.begin(); + + for (Run const& run : test_reporter.all_runs_) { + assert(EB != ExpectedResults.end()); + EB->CheckRun(run); + ++EB; + } + assert(EB == ExpectedResults.end()); + + return 0; +} diff --git a/benchmarks/thirdparty/benchmark/test/state_assembly_test.cc b/benchmarks/thirdparty/benchmark/test/state_assembly_test.cc new file mode 100755 index 0000000000..7ddbb3b2a9 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/test/state_assembly_test.cc @@ -0,0 +1,68 @@ +#include + +#ifdef __clang__ +#pragma clang diagnostic ignored "-Wreturn-type" +#endif + +// clang-format off +extern "C" { + extern int ExternInt; + benchmark::State& GetState(); + void Fn(); +} +// clang-format on + +using benchmark::State; + +// CHECK-LABEL: test_for_auto_loop: +extern "C" int test_for_auto_loop() { + State& S = GetState(); + int x = 42; + // CHECK: [[CALL:call(q)*]] _ZN9benchmark5State16StartKeepRunningEv + // CHECK-NEXT: testq %rbx, %rbx + // CHECK-NEXT: je [[LOOP_END:.*]] + + for (auto _ : S) { + // CHECK: .L[[LOOP_HEAD:[a-zA-Z0-9_]+]]: + // CHECK-GNU-NEXT: subq $1, %rbx + // CHECK-CLANG-NEXT: {{(addq \$1, %rax|incq %rax|addq \$-1, %rbx)}} + // CHECK-NEXT: jne .L[[LOOP_HEAD]] + benchmark::DoNotOptimize(x); + } + // CHECK: [[LOOP_END]]: + // CHECK: [[CALL]] _ZN9benchmark5State17FinishKeepRunningEv + + // CHECK: movl $101, %eax + // CHECK: ret + return 101; +} + +// CHECK-LABEL: test_while_loop: +extern "C" int test_while_loop() { + State& S = GetState(); + int x = 42; + + // CHECK: j{{(e|mp)}} .L[[LOOP_HEADER:[a-zA-Z0-9_]+]] + // CHECK-NEXT: .L[[LOOP_BODY:[a-zA-Z0-9_]+]]: + while (S.KeepRunning()) { + // CHECK-GNU-NEXT: subq $1, %[[IREG:[a-z]+]] + // CHECK-CLANG-NEXT: {{(addq \$-1,|decq)}} %[[IREG:[a-z]+]] + // CHECK: movq %[[IREG]], [[DEST:.*]] + benchmark::DoNotOptimize(x); + } + // CHECK-DAG: movq [[DEST]], %[[IREG]] + // CHECK-DAG: testq %[[IREG]], %[[IREG]] + // CHECK-DAG: jne .L[[LOOP_BODY]] + // CHECK-DAG: .L[[LOOP_HEADER]]: + + // CHECK: cmpb $0 + // CHECK-NEXT: jne .L[[LOOP_END:[a-zA-Z0-9_]+]] + // CHECK: [[CALL:call(q)*]] _ZN9benchmark5State16StartKeepRunningEv + + // CHECK: .L[[LOOP_END]]: + // CHECK: [[CALL]] _ZN9benchmark5State17FinishKeepRunningEv + + // CHECK: movl $101, %eax + // CHECK: ret + return 101; +} diff --git a/benchmarks/thirdparty/benchmark/test/statistics_gtest.cc b/benchmarks/thirdparty/benchmark/test/statistics_gtest.cc new file mode 100755 index 0000000000..3ddc72dd7a --- /dev/null +++ b/benchmarks/thirdparty/benchmark/test/statistics_gtest.cc @@ -0,0 +1,28 @@ +//===---------------------------------------------------------------------===// +// statistics_test - Unit tests for src/statistics.cc +//===---------------------------------------------------------------------===// + +#include "../src/statistics.h" +#include "gtest/gtest.h" + +namespace { +TEST(StatisticsTest, Mean) { + EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({42, 42, 42, 42}), 42.0); + EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({1, 2, 3, 4}), 2.5); + EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({1, 2, 5, 10, 10, 14}), 7.0); +} + +TEST(StatisticsTest, Median) { + EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({42, 42, 42, 42}), 42.0); + EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({1, 2, 3, 4}), 2.5); + EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({1, 2, 5, 10, 10}), 5.0); +} + +TEST(StatisticsTest, StdDev) { + EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({101, 101, 101, 101}), 0.0); + EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({1, 2, 3}), 1.0); + EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({2.5, 2.4, 3.3, 4.2, 5.1}), + 1.151086443322134); +} + +} // end namespace diff --git a/benchmarks/thirdparty/benchmark/test/string_util_gtest.cc b/benchmarks/thirdparty/benchmark/test/string_util_gtest.cc new file mode 100755 index 0000000000..01bf155d8c --- /dev/null +++ b/benchmarks/thirdparty/benchmark/test/string_util_gtest.cc @@ -0,0 +1,153 @@ +//===---------------------------------------------------------------------===// +// statistics_test - Unit tests for src/statistics.cc +//===---------------------------------------------------------------------===// + +#include "../src/string_util.h" +#include "../src/internal_macros.h" +#include "gtest/gtest.h" + +namespace { +TEST(StringUtilTest, stoul) { + { + size_t pos = 0; + EXPECT_EQ(0ul, benchmark::stoul("0", &pos)); + EXPECT_EQ(1ul, pos); + } + { + size_t pos = 0; + EXPECT_EQ(7ul, benchmark::stoul("7", &pos)); + EXPECT_EQ(1ul, pos); + } + { + size_t pos = 0; + EXPECT_EQ(135ul, benchmark::stoul("135", &pos)); + EXPECT_EQ(3ul, pos); + } +#if ULONG_MAX == 0xFFFFFFFFul + { + size_t pos = 0; + EXPECT_EQ(0xFFFFFFFFul, benchmark::stoul("4294967295", &pos)); + EXPECT_EQ(10ul, pos); + } +#elif ULONG_MAX == 0xFFFFFFFFFFFFFFFFul + { + size_t pos = 0; + EXPECT_EQ(0xFFFFFFFFFFFFFFFFul, benchmark::stoul("18446744073709551615", &pos)); + EXPECT_EQ(20ul, pos); + } +#endif + { + size_t pos = 0; + EXPECT_EQ(10ul, benchmark::stoul("1010", &pos, 2)); + EXPECT_EQ(4ul, pos); + } + { + size_t pos = 0; + EXPECT_EQ(520ul, benchmark::stoul("1010", &pos, 8)); + EXPECT_EQ(4ul, pos); + } + { + size_t pos = 0; + EXPECT_EQ(1010ul, benchmark::stoul("1010", &pos, 10)); + EXPECT_EQ(4ul, pos); + } + { + size_t pos = 0; + EXPECT_EQ(4112ul, benchmark::stoul("1010", &pos, 16)); + EXPECT_EQ(4ul, pos); + } + { + size_t pos = 0; + EXPECT_EQ(0xBEEFul, benchmark::stoul("BEEF", &pos, 16)); + EXPECT_EQ(4ul, pos); + } +#ifndef BENCHMARK_HAS_NO_EXCEPTIONS + { + ASSERT_THROW(benchmark::stoul("this is a test"), std::invalid_argument); + } +#endif +} + +TEST(StringUtilTest, stoi) { + { + size_t pos = 0; + EXPECT_EQ(0, benchmark::stoi("0", &pos)); + EXPECT_EQ(1ul, pos); + } + { + size_t pos = 0; + EXPECT_EQ(-17, benchmark::stoi("-17", &pos)); + EXPECT_EQ(3ul, pos); + } + { + size_t pos = 0; + EXPECT_EQ(1357, benchmark::stoi("1357", &pos)); + EXPECT_EQ(4ul, pos); + } + { + size_t pos = 0; + EXPECT_EQ(10, benchmark::stoi("1010", &pos, 2)); + EXPECT_EQ(4ul, pos); + } + { + size_t pos = 0; + EXPECT_EQ(520, benchmark::stoi("1010", &pos, 8)); + EXPECT_EQ(4ul, pos); + } + { + size_t pos = 0; + EXPECT_EQ(1010, benchmark::stoi("1010", &pos, 10)); + EXPECT_EQ(4ul, pos); + } + { + size_t pos = 0; + EXPECT_EQ(4112, benchmark::stoi("1010", &pos, 16)); + EXPECT_EQ(4ul, pos); + } + { + size_t pos = 0; + EXPECT_EQ(0xBEEF, benchmark::stoi("BEEF", &pos, 16)); + EXPECT_EQ(4ul, pos); + } +#ifndef BENCHMARK_HAS_NO_EXCEPTIONS + { + ASSERT_THROW(benchmark::stoi("this is a test"), std::invalid_argument); + } +#endif +} + +TEST(StringUtilTest, stod) { + { + size_t pos = 0; + EXPECT_EQ(0.0, benchmark::stod("0", &pos)); + EXPECT_EQ(1ul, pos); + } + { + size_t pos = 0; + EXPECT_EQ(-84.0, benchmark::stod("-84", &pos)); + EXPECT_EQ(3ul, pos); + } + { + size_t pos = 0; + EXPECT_EQ(1234.0, benchmark::stod("1234", &pos)); + EXPECT_EQ(4ul, pos); + } + { + size_t pos = 0; + EXPECT_EQ(1.5, benchmark::stod("1.5", &pos)); + EXPECT_EQ(3ul, pos); + } + { + size_t pos = 0; + /* Note: exactly representable as double */ + EXPECT_EQ(-1.25e+9, benchmark::stod("-1.25e+9", &pos)); + EXPECT_EQ(8ul, pos); + } +#ifndef BENCHMARK_HAS_NO_EXCEPTIONS + { + ASSERT_THROW(benchmark::stod("this is a test"), std::invalid_argument); + } +#endif +} + +} // end namespace diff --git a/benchmarks/thirdparty/benchmark/test/templated_fixture_test.cc b/benchmarks/thirdparty/benchmark/test/templated_fixture_test.cc new file mode 100755 index 0000000000..fe9865cc77 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/test/templated_fixture_test.cc @@ -0,0 +1,28 @@ + +#include "benchmark/benchmark.h" + +#include +#include + +template +class MyFixture : public ::benchmark::Fixture { + public: + MyFixture() : data(0) {} + + T data; +}; + +BENCHMARK_TEMPLATE_F(MyFixture, Foo, int)(benchmark::State& st) { + for (auto _ : st) { + data += 1; + } +} + +BENCHMARK_TEMPLATE_DEFINE_F(MyFixture, Bar, double)(benchmark::State& st) { + for (auto _ : st) { + data += 1.0; + } +} +BENCHMARK_REGISTER_F(MyFixture, Bar); + +BENCHMARK_MAIN(); diff --git a/benchmarks/thirdparty/benchmark/test/user_counters_tabular_test.cc b/benchmarks/thirdparty/benchmark/test/user_counters_tabular_test.cc new file mode 100755 index 0000000000..18373c0aac --- /dev/null +++ b/benchmarks/thirdparty/benchmark/test/user_counters_tabular_test.cc @@ -0,0 +1,285 @@ + +#undef NDEBUG + +#include "benchmark/benchmark.h" +#include "output_test.h" + +// @todo: this checks the full output at once; the rule for +// CounterSet1 was failing because it was not matching "^[-]+$". +// @todo: check that the counters are vertically aligned. +ADD_CASES( + TC_ConsoleOut, + { + // keeping these lines long improves readability, so: + // clang-format off + {"^[-]+$", MR_Next}, + {"^Benchmark %s Time %s CPU %s Iterations %s Bar %s Bat %s Baz %s Foo %s Frob %s Lob$", MR_Next}, + {"^[-]+$", MR_Next}, + {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next}, + {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next}, + {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next}, + {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next}, + {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next}, + {"^[-]+$", MR_Next}, + {"^Benchmark %s Time %s CPU %s Iterations %s Bar %s Baz %s Foo$", MR_Next}, + {"^[-]+$", MR_Next}, + {"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^[-]+$", MR_Next}, + {"^Benchmark %s Time %s CPU %s Iterations %s Bat %s Baz %s Foo$", MR_Next}, + {"^[-]+$", MR_Next}, + {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$"}, + // clang-format on + }); +ADD_CASES(TC_CSVOut, {{"%csv_header," + "\"Bar\",\"Bat\",\"Baz\",\"Foo\",\"Frob\",\"Lob\""}}); + +// ========================================================================= // +// ------------------------- Tabular Counters Output ----------------------- // +// ========================================================================= // + +void BM_Counters_Tabular(benchmark::State& state) { + for (auto _ : state) { + } + namespace bm = benchmark; + state.counters.insert({ + {"Foo", {1, bm::Counter::kAvgThreads}}, + {"Bar", {2, bm::Counter::kAvgThreads}}, + {"Baz", {4, bm::Counter::kAvgThreads}}, + {"Bat", {8, bm::Counter::kAvgThreads}}, + {"Frob", {16, bm::Counter::kAvgThreads}}, + {"Lob", {32, bm::Counter::kAvgThreads}}, + }); +} +BENCHMARK(BM_Counters_Tabular)->ThreadRange(1, 16); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_Tabular/threads:%int\",$"}, + {"\"run_name\": \"BM_Counters_Tabular/threads:%int\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 0,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Bat\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float,$", MR_Next}, + {"\"Frob\": %float,$", MR_Next}, + {"\"Lob\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Tabular/threads:%int\",%csv_report," + "%float,%float,%float,%float,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckTabular(Results const& e) { + CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 1); + CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 2); + CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 4); + CHECK_COUNTER_VALUE(e, int, "Bat", EQ, 8); + CHECK_COUNTER_VALUE(e, int, "Frob", EQ, 16); + CHECK_COUNTER_VALUE(e, int, "Lob", EQ, 32); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/threads:%int", &CheckTabular); + +// ========================================================================= // +// -------------------- Tabular+Rate Counters Output ----------------------- // +// ========================================================================= // + +void BM_CounterRates_Tabular(benchmark::State& state) { + for (auto _ : state) { + // This test requires a non-zero CPU time to avoid divide-by-zero + benchmark::DoNotOptimize(state.iterations()); + } + namespace bm = benchmark; + state.counters.insert({ + {"Foo", {1, bm::Counter::kAvgThreadsRate}}, + {"Bar", {2, bm::Counter::kAvgThreadsRate}}, + {"Baz", {4, bm::Counter::kAvgThreadsRate}}, + {"Bat", {8, bm::Counter::kAvgThreadsRate}}, + {"Frob", {16, bm::Counter::kAvgThreadsRate}}, + {"Lob", {32, bm::Counter::kAvgThreadsRate}}, + }); +} +BENCHMARK(BM_CounterRates_Tabular)->ThreadRange(1, 16); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_CounterRates_Tabular/threads:%int\",$"}, + {"\"run_name\": \"BM_CounterRates_Tabular/threads:%int\",$", + MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 0,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Bat\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float,$", MR_Next}, + {"\"Frob\": %float,$", MR_Next}, + {"\"Lob\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_CounterRates_Tabular/threads:%int\",%csv_report," + "%float,%float,%float,%float,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckTabularRate(Results const& e) { + double t = e.DurationCPUTime(); + CHECK_FLOAT_COUNTER_VALUE(e, "Foo", EQ, 1. / t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "Bar", EQ, 2. / t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "Baz", EQ, 4. / t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "Bat", EQ, 8. / t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "Frob", EQ, 16. / t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "Lob", EQ, 32. / t, 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_CounterRates_Tabular/threads:%int", + &CheckTabularRate); + +// ========================================================================= // +// ------------------------- Tabular Counters Output ----------------------- // +// ========================================================================= // + +// set only some of the counters +void BM_CounterSet0_Tabular(benchmark::State& state) { + for (auto _ : state) { + } + namespace bm = benchmark; + state.counters.insert({ + {"Foo", {10, bm::Counter::kAvgThreads}}, + {"Bar", {20, bm::Counter::kAvgThreads}}, + {"Baz", {40, bm::Counter::kAvgThreads}}, + }); +} +BENCHMARK(BM_CounterSet0_Tabular)->ThreadRange(1, 16); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_CounterSet0_Tabular/threads:%int\",$"}, + {"\"run_name\": \"BM_CounterSet0_Tabular/threads:%int\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 0,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet0_Tabular/threads:%int\",%csv_report," + "%float,,%float,%float,,"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckSet0(Results const& e) { + CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 10); + CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 20); + CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 40); +} +CHECK_BENCHMARK_RESULTS("BM_CounterSet0_Tabular", &CheckSet0); + +// again. +void BM_CounterSet1_Tabular(benchmark::State& state) { + for (auto _ : state) { + } + namespace bm = benchmark; + state.counters.insert({ + {"Foo", {15, bm::Counter::kAvgThreads}}, + {"Bar", {25, bm::Counter::kAvgThreads}}, + {"Baz", {45, bm::Counter::kAvgThreads}}, + }); +} +BENCHMARK(BM_CounterSet1_Tabular)->ThreadRange(1, 16); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_CounterSet1_Tabular/threads:%int\",$"}, + {"\"run_name\": \"BM_CounterSet1_Tabular/threads:%int\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 0,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet1_Tabular/threads:%int\",%csv_report," + "%float,,%float,%float,,"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckSet1(Results const& e) { + CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 15); + CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 25); + CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 45); +} +CHECK_BENCHMARK_RESULTS("BM_CounterSet1_Tabular/threads:%int", &CheckSet1); + +// ========================================================================= // +// ------------------------- Tabular Counters Output ----------------------- // +// ========================================================================= // + +// set only some of the counters, different set now. +void BM_CounterSet2_Tabular(benchmark::State& state) { + for (auto _ : state) { + } + namespace bm = benchmark; + state.counters.insert({ + {"Foo", {10, bm::Counter::kAvgThreads}}, + {"Bat", {30, bm::Counter::kAvgThreads}}, + {"Baz", {40, bm::Counter::kAvgThreads}}, + }); +} +BENCHMARK(BM_CounterSet2_Tabular)->ThreadRange(1, 16); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_CounterSet2_Tabular/threads:%int\",$"}, + {"\"run_name\": \"BM_CounterSet2_Tabular/threads:%int\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 0,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bat\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet2_Tabular/threads:%int\",%csv_report," + ",%float,%float,%float,,"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckSet2(Results const& e) { + CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 10); + CHECK_COUNTER_VALUE(e, int, "Bat", EQ, 30); + CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 40); +} +CHECK_BENCHMARK_RESULTS("BM_CounterSet2_Tabular", &CheckSet2); + +// ========================================================================= // +// --------------------------- TEST CASES END ------------------------------ // +// ========================================================================= // + +int main(int argc, char* argv[]) { RunOutputTests(argc, argv); } diff --git a/benchmarks/thirdparty/benchmark/test/user_counters_test.cc b/benchmarks/thirdparty/benchmark/test/user_counters_test.cc new file mode 100755 index 0000000000..5699f4f5e1 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/test/user_counters_test.cc @@ -0,0 +1,531 @@ + +#undef NDEBUG + +#include "benchmark/benchmark.h" +#include "output_test.h" + +// ========================================================================= // +// ---------------------- Testing Prologue Output -------------------------- // +// ========================================================================= // + +// clang-format off + +ADD_CASES(TC_ConsoleOut, + {{"^[-]+$", MR_Next}, + {"^Benchmark %s Time %s CPU %s Iterations UserCounters...$", MR_Next}, + {"^[-]+$", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"%csv_header,\"bar\",\"foo\""}}); + +// clang-format on + +// ========================================================================= // +// ------------------------- Simple Counters Output ------------------------ // +// ========================================================================= // + +void BM_Counters_Simple(benchmark::State& state) { + for (auto _ : state) { + } + state.counters["foo"] = 1; + state.counters["bar"] = 2 * (double)state.iterations(); +} +BENCHMARK(BM_Counters_Simple); +ADD_CASES(TC_ConsoleOut, + {{"^BM_Counters_Simple %console_report bar=%hrfloat foo=%hrfloat$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Simple\",$"}, + {"\"run_name\": \"BM_Counters_Simple\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 0,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Simple\",%csv_report,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckSimple(Results const& e) { + double its = e.NumIterations(); + CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1); + // check that the value of bar is within 0.1% of the expected value + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. * its, 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_Simple", &CheckSimple); + +// ========================================================================= // +// --------------------- Counters+Items+Bytes/s Output --------------------- // +// ========================================================================= // + +namespace { +int num_calls1 = 0; +} +void BM_Counters_WithBytesAndItemsPSec(benchmark::State& state) { + for (auto _ : state) { + // This test requires a non-zero CPU time to avoid divide-by-zero + benchmark::DoNotOptimize(state.iterations()); + } + state.counters["foo"] = 1; + state.counters["bar"] = ++num_calls1; + state.SetBytesProcessed(364); + state.SetItemsProcessed(150); +} +BENCHMARK(BM_Counters_WithBytesAndItemsPSec); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_WithBytesAndItemsPSec %console_report " + "bar=%hrfloat bytes_per_second=%hrfloat/s " + "foo=%hrfloat items_per_second=%hrfloat/s$"}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_WithBytesAndItemsPSec\",$"}, + {"\"run_name\": \"BM_Counters_WithBytesAndItemsPSec\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 0,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"bytes_per_second\": %float,$", MR_Next}, + {"\"foo\": %float,$", MR_Next}, + {"\"items_per_second\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_WithBytesAndItemsPSec\"," + "%csv_bytes_items_report,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckBytesAndItemsPSec(Results const& e) { + double t = e.DurationCPUTime(); // this (and not real time) is the time used + CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1); + CHECK_COUNTER_VALUE(e, int, "bar", EQ, num_calls1); + // check that the values are within 0.1% of the expected values + CHECK_FLOAT_RESULT_VALUE(e, "bytes_per_second", EQ, 364. / t, 0.001); + CHECK_FLOAT_RESULT_VALUE(e, "items_per_second", EQ, 150. / t, 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_WithBytesAndItemsPSec", + &CheckBytesAndItemsPSec); + +// ========================================================================= // +// ------------------------- Rate Counters Output -------------------------- // +// ========================================================================= // + +void BM_Counters_Rate(benchmark::State& state) { + for (auto _ : state) { + // This test requires a non-zero CPU time to avoid divide-by-zero + benchmark::DoNotOptimize(state.iterations()); + } + namespace bm = benchmark; + state.counters["foo"] = bm::Counter{1, bm::Counter::kIsRate}; + state.counters["bar"] = bm::Counter{2, bm::Counter::kIsRate}; +} +BENCHMARK(BM_Counters_Rate); +ADD_CASES( + TC_ConsoleOut, + {{"^BM_Counters_Rate %console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Rate\",$"}, + {"\"run_name\": \"BM_Counters_Rate\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 0,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Rate\",%csv_report,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckRate(Results const& e) { + double t = e.DurationCPUTime(); // this (and not real time) is the time used + // check that the values are within 0.1% of the expected values + CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / t, 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_Rate", &CheckRate); + +// ========================================================================= // +// ----------------------- Inverted Counters Output ------------------------ // +// ========================================================================= // + +void BM_Invert(benchmark::State& state) { + for (auto _ : state) { + // This test requires a non-zero CPU time to avoid divide-by-zero + benchmark::DoNotOptimize(state.iterations()); + } + namespace bm = benchmark; + state.counters["foo"] = bm::Counter{0.0001, bm::Counter::kInvert}; + state.counters["bar"] = bm::Counter{10000, bm::Counter::kInvert}; +} +BENCHMARK(BM_Invert); +ADD_CASES(TC_ConsoleOut, + {{"^BM_Invert %console_report bar=%hrfloatu foo=%hrfloatk$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Invert\",$"}, + {"\"run_name\": \"BM_Invert\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 0,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Invert\",%csv_report,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckInvert(Results const& e) { + CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 10000, 0.0001); + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 0.0001, 0.0001); +} +CHECK_BENCHMARK_RESULTS("BM_Invert", &CheckInvert); + +// ========================================================================= // +// ------------------------- InvertedRate Counters Output +// -------------------------- // +// ========================================================================= // + +void BM_Counters_InvertedRate(benchmark::State& state) { + for (auto _ : state) { + // This test requires a non-zero CPU time to avoid divide-by-zero + benchmark::DoNotOptimize(state.iterations()); + } + namespace bm = benchmark; + state.counters["foo"] = + bm::Counter{1, bm::Counter::kIsRate | bm::Counter::kInvert}; + state.counters["bar"] = + bm::Counter{8192, bm::Counter::kIsRate | bm::Counter::kInvert}; +} +BENCHMARK(BM_Counters_InvertedRate); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_InvertedRate %console_report " + "bar=%hrfloats foo=%hrfloats$"}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_InvertedRate\",$"}, + {"\"run_name\": \"BM_Counters_InvertedRate\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 0,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_Counters_InvertedRate\",%csv_report,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckInvertedRate(Results const& e) { + double t = e.DurationCPUTime(); // this (and not real time) is the time used + // check that the values are within 0.1% of the expected values + CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, t / 8192.0, 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_InvertedRate", &CheckInvertedRate); + +// ========================================================================= // +// ------------------------- Thread Counters Output ------------------------ // +// ========================================================================= // + +void BM_Counters_Threads(benchmark::State& state) { + for (auto _ : state) { + } + state.counters["foo"] = 1; + state.counters["bar"] = 2; +} +BENCHMARK(BM_Counters_Threads)->ThreadRange(1, 8); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Threads/threads:%int %console_report " + "bar=%hrfloat foo=%hrfloat$"}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_Threads/threads:%int\",$"}, + {"\"run_name\": \"BM_Counters_Threads/threads:%int\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 0,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES( + TC_CSVOut, + {{"^\"BM_Counters_Threads/threads:%int\",%csv_report,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckThreads(Results const& e) { + CHECK_COUNTER_VALUE(e, int, "foo", EQ, e.NumThreads()); + CHECK_COUNTER_VALUE(e, int, "bar", EQ, 2 * e.NumThreads()); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_Threads/threads:%int", &CheckThreads); + +// ========================================================================= // +// ---------------------- ThreadAvg Counters Output ------------------------ // +// ========================================================================= // + +void BM_Counters_AvgThreads(benchmark::State& state) { + for (auto _ : state) { + } + namespace bm = benchmark; + state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreads}; + state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgThreads}; +} +BENCHMARK(BM_Counters_AvgThreads)->ThreadRange(1, 8); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreads/threads:%int " + "%console_report bar=%hrfloat foo=%hrfloat$"}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_AvgThreads/threads:%int\",$"}, + {"\"run_name\": \"BM_Counters_AvgThreads/threads:%int\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 0,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES( + TC_CSVOut, + {{"^\"BM_Counters_AvgThreads/threads:%int\",%csv_report,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckAvgThreads(Results const& e) { + CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1); + CHECK_COUNTER_VALUE(e, int, "bar", EQ, 2); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreads/threads:%int", + &CheckAvgThreads); + +// ========================================================================= // +// ---------------------- ThreadAvg Counters Output ------------------------ // +// ========================================================================= // + +void BM_Counters_AvgThreadsRate(benchmark::State& state) { + for (auto _ : state) { + // This test requires a non-zero CPU time to avoid divide-by-zero + benchmark::DoNotOptimize(state.iterations()); + } + namespace bm = benchmark; + state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreadsRate}; + state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgThreadsRate}; +} +BENCHMARK(BM_Counters_AvgThreadsRate)->ThreadRange(1, 8); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreadsRate/threads:%int " + "%console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$"}, + {"\"run_name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$", + MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 0,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_AvgThreadsRate/" + "threads:%int\",%csv_report,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckAvgThreadsRate(Results const& e) { + CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / e.DurationCPUTime(), 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / e.DurationCPUTime(), 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreadsRate/threads:%int", + &CheckAvgThreadsRate); + +// ========================================================================= // +// ------------------- IterationInvariant Counters Output ------------------ // +// ========================================================================= // + +void BM_Counters_IterationInvariant(benchmark::State& state) { + for (auto _ : state) { + } + namespace bm = benchmark; + state.counters["foo"] = bm::Counter{1, bm::Counter::kIsIterationInvariant}; + state.counters["bar"] = bm::Counter{2, bm::Counter::kIsIterationInvariant}; +} +BENCHMARK(BM_Counters_IterationInvariant); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_IterationInvariant %console_report " + "bar=%hrfloat foo=%hrfloat$"}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_IterationInvariant\",$"}, + {"\"run_name\": \"BM_Counters_IterationInvariant\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 0,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_Counters_IterationInvariant\",%csv_report,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckIterationInvariant(Results const& e) { + double its = e.NumIterations(); + // check that the values are within 0.1% of the expected value + CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, its, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. * its, 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_IterationInvariant", + &CheckIterationInvariant); + +// ========================================================================= // +// ----------------- IterationInvariantRate Counters Output ---------------- // +// ========================================================================= // + +void BM_Counters_kIsIterationInvariantRate(benchmark::State& state) { + for (auto _ : state) { + // This test requires a non-zero CPU time to avoid divide-by-zero + benchmark::DoNotOptimize(state.iterations()); + } + namespace bm = benchmark; + state.counters["foo"] = + bm::Counter{1, bm::Counter::kIsIterationInvariantRate}; + state.counters["bar"] = + bm::Counter{2, bm::Counter::kIsRate | bm::Counter::kIsIterationInvariant}; +} +BENCHMARK(BM_Counters_kIsIterationInvariantRate); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kIsIterationInvariantRate " + "%console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_kIsIterationInvariantRate\",$"}, + {"\"run_name\": \"BM_Counters_kIsIterationInvariantRate\",$", + MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 0,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_kIsIterationInvariantRate\",%csv_report," + "%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckIsIterationInvariantRate(Results const& e) { + double its = e.NumIterations(); + double t = e.DurationCPUTime(); // this (and not real time) is the time used + // check that the values are within 0.1% of the expected values + CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, its * 1. / t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, its * 2. / t, 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_kIsIterationInvariantRate", + &CheckIsIterationInvariantRate); + +// ========================================================================= // +// ------------------- AvgIterations Counters Output ------------------ // +// ========================================================================= // + +void BM_Counters_AvgIterations(benchmark::State& state) { + for (auto _ : state) { + } + namespace bm = benchmark; + state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterations}; + state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgIterations}; +} +BENCHMARK(BM_Counters_AvgIterations); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgIterations %console_report " + "bar=%hrfloat foo=%hrfloat$"}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_AvgIterations\",$"}, + {"\"run_name\": \"BM_Counters_AvgIterations\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 0,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_Counters_AvgIterations\",%csv_report,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckAvgIterations(Results const& e) { + double its = e.NumIterations(); + // check that the values are within 0.1% of the expected value + CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / its, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / its, 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_AvgIterations", &CheckAvgIterations); + +// ========================================================================= // +// ----------------- AvgIterationsRate Counters Output ---------------- // +// ========================================================================= // + +void BM_Counters_kAvgIterationsRate(benchmark::State& state) { + for (auto _ : state) { + // This test requires a non-zero CPU time to avoid divide-by-zero + benchmark::DoNotOptimize(state.iterations()); + } + namespace bm = benchmark; + state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterationsRate}; + state.counters["bar"] = + bm::Counter{2, bm::Counter::kIsRate | bm::Counter::kAvgIterations}; +} +BENCHMARK(BM_Counters_kAvgIterationsRate); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kAvgIterationsRate " + "%console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_kAvgIterationsRate\",$"}, + {"\"run_name\": \"BM_Counters_kAvgIterationsRate\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 0,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_kAvgIterationsRate\",%csv_report," + "%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckAvgIterationsRate(Results const& e) { + double its = e.NumIterations(); + double t = e.DurationCPUTime(); // this (and not real time) is the time used + // check that the values are within 0.1% of the expected values + CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / its / t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / its / t, 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_kAvgIterationsRate", + &CheckAvgIterationsRate); + +// ========================================================================= // +// --------------------------- TEST CASES END ------------------------------ // +// ========================================================================= // + +int main(int argc, char* argv[]) { RunOutputTests(argc, argv); } diff --git a/benchmarks/thirdparty/benchmark/test/user_counters_thousands_test.cc b/benchmarks/thirdparty/benchmark/test/user_counters_thousands_test.cc new file mode 100755 index 0000000000..21d8285ded --- /dev/null +++ b/benchmarks/thirdparty/benchmark/test/user_counters_thousands_test.cc @@ -0,0 +1,173 @@ + +#undef NDEBUG + +#include "benchmark/benchmark.h" +#include "output_test.h" + +// ========================================================================= // +// ------------------------ Thousands Customisation ------------------------ // +// ========================================================================= // + +void BM_Counters_Thousands(benchmark::State& state) { + for (auto _ : state) { + } + namespace bm = benchmark; + state.counters.insert({ + {"t0_1000000DefaultBase", + bm::Counter(1000 * 1000, bm::Counter::kDefaults)}, + {"t1_1000000Base1000", bm::Counter(1000 * 1000, bm::Counter::kDefaults, + benchmark::Counter::OneK::kIs1000)}, + {"t2_1000000Base1024", bm::Counter(1000 * 1000, bm::Counter::kDefaults, + benchmark::Counter::OneK::kIs1024)}, + {"t3_1048576Base1000", bm::Counter(1024 * 1024, bm::Counter::kDefaults, + benchmark::Counter::OneK::kIs1000)}, + {"t4_1048576Base1024", bm::Counter(1024 * 1024, bm::Counter::kDefaults, + benchmark::Counter::OneK::kIs1024)}, + }); +} +BENCHMARK(BM_Counters_Thousands)->Repetitions(2); +ADD_CASES( + TC_ConsoleOut, + { + {"^BM_Counters_Thousands/repeats:2 %console_report " + "t0_1000000DefaultBase=1000k " + "t1_1000000Base1000=1000k t2_1000000Base1024=976.56[23]k " + "t3_1048576Base1000=1048.58k t4_1048576Base1024=1024k$"}, + {"^BM_Counters_Thousands/repeats:2 %console_report " + "t0_1000000DefaultBase=1000k " + "t1_1000000Base1000=1000k t2_1000000Base1024=976.56[23]k " + "t3_1048576Base1000=1048.58k t4_1048576Base1024=1024k$"}, + {"^BM_Counters_Thousands/repeats:2_mean %console_report " + "t0_1000000DefaultBase=1000k t1_1000000Base1000=1000k " + "t2_1000000Base1024=976.56[23]k t3_1048576Base1000=1048.58k " + "t4_1048576Base1024=1024k$"}, + {"^BM_Counters_Thousands/repeats:2_median %console_report " + "t0_1000000DefaultBase=1000k t1_1000000Base1000=1000k " + "t2_1000000Base1024=976.56[23]k t3_1048576Base1000=1048.58k " + "t4_1048576Base1024=1024k$"}, + {"^BM_Counters_Thousands/repeats:2_stddev %console_time_only_report [ " + "]*2 t0_1000000DefaultBase=0 t1_1000000Base1000=0 " + "t2_1000000Base1024=0 t3_1048576Base1000=0 t4_1048576Base1024=0$"}, + }); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"}, + {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, + {"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, + {"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, + {"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next}, + {"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"}, + {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"repetition_index\": 1,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, + {"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, + {"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, + {"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next}, + {"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_Thousands/repeats:2_mean\",$"}, + {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"iterations\": 2,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, + {"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, + {"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, + {"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next}, + {"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_Thousands/repeats:2_median\",$"}, + {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"iterations\": 2,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, + {"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, + {"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, + {"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next}, + {"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_Thousands/repeats:2_stddev\",$"}, + {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"iterations\": 2,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"t0_1000000DefaultBase\": 0\\.(0)*e\\+(0)*,$", MR_Next}, + {"\"t1_1000000Base1000\": 0\\.(0)*e\\+(0)*,$", MR_Next}, + {"\"t2_1000000Base1024\": 0\\.(0)*e\\+(0)*,$", MR_Next}, + {"\"t3_1048576Base1000\": 0\\.(0)*e\\+(0)*,$", MR_Next}, + {"\"t4_1048576Base1024\": 0\\.(0)*e\\+(0)*$", MR_Next}, + {"}", MR_Next}}); + +ADD_CASES( + TC_CSVOut, + {{"^\"BM_Counters_Thousands/" + "repeats:2\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\.04858e\\+(" + "0)*6,1\\.04858e\\+(0)*6$"}, + {"^\"BM_Counters_Thousands/" + "repeats:2\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\.04858e\\+(" + "0)*6,1\\.04858e\\+(0)*6$"}, + {"^\"BM_Counters_Thousands/" + "repeats:2_mean\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\." + "04858e\\+(0)*6,1\\.04858e\\+(0)*6$"}, + {"^\"BM_Counters_Thousands/" + "repeats:2_median\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\." + "04858e\\+(0)*6,1\\.04858e\\+(0)*6$"}, + {"^\"BM_Counters_Thousands/repeats:2_stddev\",%csv_report,0,0,0,0,0$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckThousands(Results const& e) { + if (e.name != "BM_Counters_Thousands/repeats:2") + return; // Do not check the aggregates! + + // check that the values are within 0.01% of the expected values + CHECK_FLOAT_COUNTER_VALUE(e, "t0_1000000DefaultBase", EQ, 1000 * 1000, + 0.0001); + CHECK_FLOAT_COUNTER_VALUE(e, "t1_1000000Base1000", EQ, 1000 * 1000, 0.0001); + CHECK_FLOAT_COUNTER_VALUE(e, "t2_1000000Base1024", EQ, 1000 * 1000, 0.0001); + CHECK_FLOAT_COUNTER_VALUE(e, "t3_1048576Base1000", EQ, 1024 * 1024, 0.0001); + CHECK_FLOAT_COUNTER_VALUE(e, "t4_1048576Base1024", EQ, 1024 * 1024, 0.0001); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_Thousands", &CheckThousands); + +// ========================================================================= // +// --------------------------- TEST CASES END ------------------------------ // +// ========================================================================= // + +int main(int argc, char* argv[]) { RunOutputTests(argc, argv); } diff --git a/benchmarks/thirdparty/benchmark/tools/compare.py b/benchmarks/thirdparty/benchmark/tools/compare.py new file mode 100755 index 0000000000..bd01be57cd --- /dev/null +++ b/benchmarks/thirdparty/benchmark/tools/compare.py @@ -0,0 +1,416 @@ +#!/usr/bin/env python + +import unittest +""" +compare.py - versatile benchmark output compare tool +""" + +import argparse +from argparse import ArgumentParser +import sys +import gbench +from gbench import util, report +from gbench.util import * + + +def check_inputs(in1, in2, flags): + """ + Perform checking on the user provided inputs and diagnose any abnormalities + """ + in1_kind, in1_err = classify_input_file(in1) + in2_kind, in2_err = classify_input_file(in2) + output_file = find_benchmark_flag('--benchmark_out=', flags) + output_type = find_benchmark_flag('--benchmark_out_format=', flags) + if in1_kind == IT_Executable and in2_kind == IT_Executable and output_file: + print(("WARNING: '--benchmark_out=%s' will be passed to both " + "benchmarks causing it to be overwritten") % output_file) + if in1_kind == IT_JSON and in2_kind == IT_JSON and len(flags) > 0: + print("WARNING: passing optional flags has no effect since both " + "inputs are JSON") + if output_type is not None and output_type != 'json': + print(("ERROR: passing '--benchmark_out_format=%s' to 'compare.py`" + " is not supported.") % output_type) + sys.exit(1) + + +def create_parser(): + parser = ArgumentParser( + description='versatile benchmark output compare tool') + + parser.add_argument( + '-a', + '--display_aggregates_only', + dest='display_aggregates_only', + action="store_true", + help="If there are repetitions, by default, we display everything - the" + " actual runs, and the aggregates computed. Sometimes, it is " + "desirable to only view the aggregates. E.g. when there are a lot " + "of repetitions. Do note that only the display is affected. " + "Internally, all the actual runs are still used, e.g. for U test.") + + parser.add_argument( + '--no-color', + dest='color', + default=True, + action="store_false", + help="Do not use colors in the terminal output" + ) + + utest = parser.add_argument_group() + utest.add_argument( + '--no-utest', + dest='utest', + default=True, + action="store_false", + help="The tool can do a two-tailed Mann-Whitney U test with the null hypothesis that it is equally likely that a randomly selected value from one sample will be less than or greater than a randomly selected value from a second sample.\nWARNING: requires **LARGE** (no less than {}) number of repetitions to be meaningful!\nThe test is being done by default, if at least {} repetitions were done.\nThis option can disable the U Test.".format(report.UTEST_OPTIMAL_REPETITIONS, report.UTEST_MIN_REPETITIONS)) + alpha_default = 0.05 + utest.add_argument( + "--alpha", + dest='utest_alpha', + default=alpha_default, + type=float, + help=("significance level alpha. if the calculated p-value is below this value, then the result is said to be statistically significant and the null hypothesis is rejected.\n(default: %0.4f)") % + alpha_default) + + subparsers = parser.add_subparsers( + help='This tool has multiple modes of operation:', + dest='mode') + + parser_a = subparsers.add_parser( + 'benchmarks', + help='The most simple use-case, compare all the output of these two benchmarks') + baseline = parser_a.add_argument_group( + 'baseline', 'The benchmark baseline') + baseline.add_argument( + 'test_baseline', + metavar='test_baseline', + type=argparse.FileType('r'), + nargs=1, + help='A benchmark executable or JSON output file') + contender = parser_a.add_argument_group( + 'contender', 'The benchmark that will be compared against the baseline') + contender.add_argument( + 'test_contender', + metavar='test_contender', + type=argparse.FileType('r'), + nargs=1, + help='A benchmark executable or JSON output file') + parser_a.add_argument( + 'benchmark_options', + metavar='benchmark_options', + nargs=argparse.REMAINDER, + help='Arguments to pass when running benchmark executables') + + parser_b = subparsers.add_parser( + 'filters', help='Compare filter one with the filter two of benchmark') + baseline = parser_b.add_argument_group( + 'baseline', 'The benchmark baseline') + baseline.add_argument( + 'test', + metavar='test', + type=argparse.FileType('r'), + nargs=1, + help='A benchmark executable or JSON output file') + baseline.add_argument( + 'filter_baseline', + metavar='filter_baseline', + type=str, + nargs=1, + help='The first filter, that will be used as baseline') + contender = parser_b.add_argument_group( + 'contender', 'The benchmark that will be compared against the baseline') + contender.add_argument( + 'filter_contender', + metavar='filter_contender', + type=str, + nargs=1, + help='The second filter, that will be compared against the baseline') + parser_b.add_argument( + 'benchmark_options', + metavar='benchmark_options', + nargs=argparse.REMAINDER, + help='Arguments to pass when running benchmark executables') + + parser_c = subparsers.add_parser( + 'benchmarksfiltered', + help='Compare filter one of first benchmark with filter two of the second benchmark') + baseline = parser_c.add_argument_group( + 'baseline', 'The benchmark baseline') + baseline.add_argument( + 'test_baseline', + metavar='test_baseline', + type=argparse.FileType('r'), + nargs=1, + help='A benchmark executable or JSON output file') + baseline.add_argument( + 'filter_baseline', + metavar='filter_baseline', + type=str, + nargs=1, + help='The first filter, that will be used as baseline') + contender = parser_c.add_argument_group( + 'contender', 'The benchmark that will be compared against the baseline') + contender.add_argument( + 'test_contender', + metavar='test_contender', + type=argparse.FileType('r'), + nargs=1, + help='The second benchmark executable or JSON output file, that will be compared against the baseline') + contender.add_argument( + 'filter_contender', + metavar='filter_contender', + type=str, + nargs=1, + help='The second filter, that will be compared against the baseline') + parser_c.add_argument( + 'benchmark_options', + metavar='benchmark_options', + nargs=argparse.REMAINDER, + help='Arguments to pass when running benchmark executables') + + return parser + + +def main(): + # Parse the command line flags + parser = create_parser() + args, unknown_args = parser.parse_known_args() + if args.mode is None: + parser.print_help() + exit(1) + assert not unknown_args + benchmark_options = args.benchmark_options + + if args.mode == 'benchmarks': + test_baseline = args.test_baseline[0].name + test_contender = args.test_contender[0].name + filter_baseline = '' + filter_contender = '' + + # NOTE: if test_baseline == test_contender, you are analyzing the stdev + + description = 'Comparing %s to %s' % (test_baseline, test_contender) + elif args.mode == 'filters': + test_baseline = args.test[0].name + test_contender = args.test[0].name + filter_baseline = args.filter_baseline[0] + filter_contender = args.filter_contender[0] + + # NOTE: if filter_baseline == filter_contender, you are analyzing the + # stdev + + description = 'Comparing %s to %s (from %s)' % ( + filter_baseline, filter_contender, args.test[0].name) + elif args.mode == 'benchmarksfiltered': + test_baseline = args.test_baseline[0].name + test_contender = args.test_contender[0].name + filter_baseline = args.filter_baseline[0] + filter_contender = args.filter_contender[0] + + # NOTE: if test_baseline == test_contender and + # filter_baseline == filter_contender, you are analyzing the stdev + + description = 'Comparing %s (from %s) to %s (from %s)' % ( + filter_baseline, test_baseline, filter_contender, test_contender) + else: + # should never happen + print("Unrecognized mode of operation: '%s'" % args.mode) + parser.print_help() + exit(1) + + check_inputs(test_baseline, test_contender, benchmark_options) + + if args.display_aggregates_only: + benchmark_options += ['--benchmark_display_aggregates_only=true'] + + options_baseline = [] + options_contender = [] + + if filter_baseline and filter_contender: + options_baseline = ['--benchmark_filter=%s' % filter_baseline] + options_contender = ['--benchmark_filter=%s' % filter_contender] + + # Run the benchmarks and report the results + json1 = json1_orig = gbench.util.run_or_load_benchmark( + test_baseline, benchmark_options + options_baseline) + json2 = json2_orig = gbench.util.run_or_load_benchmark( + test_contender, benchmark_options + options_contender) + + # Now, filter the benchmarks so that the difference report can work + if filter_baseline and filter_contender: + replacement = '[%s vs. %s]' % (filter_baseline, filter_contender) + json1 = gbench.report.filter_benchmark( + json1_orig, filter_baseline, replacement) + json2 = gbench.report.filter_benchmark( + json2_orig, filter_contender, replacement) + + # Diff and output + output_lines = gbench.report.generate_difference_report( + json1, json2, args.display_aggregates_only, + args.utest, args.utest_alpha, args.color) + print(description) + for ln in output_lines: + print(ln) + + +class TestParser(unittest.TestCase): + def setUp(self): + self.parser = create_parser() + testInputs = os.path.join( + os.path.dirname( + os.path.realpath(__file__)), + 'gbench', + 'Inputs') + self.testInput0 = os.path.join(testInputs, 'test1_run1.json') + self.testInput1 = os.path.join(testInputs, 'test1_run2.json') + + def test_benchmarks_basic(self): + parsed = self.parser.parse_args( + ['benchmarks', self.testInput0, self.testInput1]) + self.assertFalse(parsed.display_aggregates_only) + self.assertTrue(parsed.utest) + self.assertEqual(parsed.mode, 'benchmarks') + self.assertEqual(parsed.test_baseline[0].name, self.testInput0) + self.assertEqual(parsed.test_contender[0].name, self.testInput1) + self.assertFalse(parsed.benchmark_options) + + def test_benchmarks_basic_without_utest(self): + parsed = self.parser.parse_args( + ['--no-utest', 'benchmarks', self.testInput0, self.testInput1]) + self.assertFalse(parsed.display_aggregates_only) + self.assertFalse(parsed.utest) + self.assertEqual(parsed.utest_alpha, 0.05) + self.assertEqual(parsed.mode, 'benchmarks') + self.assertEqual(parsed.test_baseline[0].name, self.testInput0) + self.assertEqual(parsed.test_contender[0].name, self.testInput1) + self.assertFalse(parsed.benchmark_options) + + def test_benchmarks_basic_display_aggregates_only(self): + parsed = self.parser.parse_args( + ['-a', 'benchmarks', self.testInput0, self.testInput1]) + self.assertTrue(parsed.display_aggregates_only) + self.assertTrue(parsed.utest) + self.assertEqual(parsed.mode, 'benchmarks') + self.assertEqual(parsed.test_baseline[0].name, self.testInput0) + self.assertEqual(parsed.test_contender[0].name, self.testInput1) + self.assertFalse(parsed.benchmark_options) + + def test_benchmarks_basic_with_utest_alpha(self): + parsed = self.parser.parse_args( + ['--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1]) + self.assertFalse(parsed.display_aggregates_only) + self.assertTrue(parsed.utest) + self.assertEqual(parsed.utest_alpha, 0.314) + self.assertEqual(parsed.mode, 'benchmarks') + self.assertEqual(parsed.test_baseline[0].name, self.testInput0) + self.assertEqual(parsed.test_contender[0].name, self.testInput1) + self.assertFalse(parsed.benchmark_options) + + def test_benchmarks_basic_without_utest_with_utest_alpha(self): + parsed = self.parser.parse_args( + ['--no-utest', '--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1]) + self.assertFalse(parsed.display_aggregates_only) + self.assertFalse(parsed.utest) + self.assertEqual(parsed.utest_alpha, 0.314) + self.assertEqual(parsed.mode, 'benchmarks') + self.assertEqual(parsed.test_baseline[0].name, self.testInput0) + self.assertEqual(parsed.test_contender[0].name, self.testInput1) + self.assertFalse(parsed.benchmark_options) + + def test_benchmarks_with_remainder(self): + parsed = self.parser.parse_args( + ['benchmarks', self.testInput0, self.testInput1, 'd']) + self.assertFalse(parsed.display_aggregates_only) + self.assertTrue(parsed.utest) + self.assertEqual(parsed.mode, 'benchmarks') + self.assertEqual(parsed.test_baseline[0].name, self.testInput0) + self.assertEqual(parsed.test_contender[0].name, self.testInput1) + self.assertEqual(parsed.benchmark_options, ['d']) + + def test_benchmarks_with_remainder_after_doubleminus(self): + parsed = self.parser.parse_args( + ['benchmarks', self.testInput0, self.testInput1, '--', 'e']) + self.assertFalse(parsed.display_aggregates_only) + self.assertTrue(parsed.utest) + self.assertEqual(parsed.mode, 'benchmarks') + self.assertEqual(parsed.test_baseline[0].name, self.testInput0) + self.assertEqual(parsed.test_contender[0].name, self.testInput1) + self.assertEqual(parsed.benchmark_options, ['e']) + + def test_filters_basic(self): + parsed = self.parser.parse_args( + ['filters', self.testInput0, 'c', 'd']) + self.assertFalse(parsed.display_aggregates_only) + self.assertTrue(parsed.utest) + self.assertEqual(parsed.mode, 'filters') + self.assertEqual(parsed.test[0].name, self.testInput0) + self.assertEqual(parsed.filter_baseline[0], 'c') + self.assertEqual(parsed.filter_contender[0], 'd') + self.assertFalse(parsed.benchmark_options) + + def test_filters_with_remainder(self): + parsed = self.parser.parse_args( + ['filters', self.testInput0, 'c', 'd', 'e']) + self.assertFalse(parsed.display_aggregates_only) + self.assertTrue(parsed.utest) + self.assertEqual(parsed.mode, 'filters') + self.assertEqual(parsed.test[0].name, self.testInput0) + self.assertEqual(parsed.filter_baseline[0], 'c') + self.assertEqual(parsed.filter_contender[0], 'd') + self.assertEqual(parsed.benchmark_options, ['e']) + + def test_filters_with_remainder_after_doubleminus(self): + parsed = self.parser.parse_args( + ['filters', self.testInput0, 'c', 'd', '--', 'f']) + self.assertFalse(parsed.display_aggregates_only) + self.assertTrue(parsed.utest) + self.assertEqual(parsed.mode, 'filters') + self.assertEqual(parsed.test[0].name, self.testInput0) + self.assertEqual(parsed.filter_baseline[0], 'c') + self.assertEqual(parsed.filter_contender[0], 'd') + self.assertEqual(parsed.benchmark_options, ['f']) + + def test_benchmarksfiltered_basic(self): + parsed = self.parser.parse_args( + ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e']) + self.assertFalse(parsed.display_aggregates_only) + self.assertTrue(parsed.utest) + self.assertEqual(parsed.mode, 'benchmarksfiltered') + self.assertEqual(parsed.test_baseline[0].name, self.testInput0) + self.assertEqual(parsed.filter_baseline[0], 'c') + self.assertEqual(parsed.test_contender[0].name, self.testInput1) + self.assertEqual(parsed.filter_contender[0], 'e') + self.assertFalse(parsed.benchmark_options) + + def test_benchmarksfiltered_with_remainder(self): + parsed = self.parser.parse_args( + ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', 'f']) + self.assertFalse(parsed.display_aggregates_only) + self.assertTrue(parsed.utest) + self.assertEqual(parsed.mode, 'benchmarksfiltered') + self.assertEqual(parsed.test_baseline[0].name, self.testInput0) + self.assertEqual(parsed.filter_baseline[0], 'c') + self.assertEqual(parsed.test_contender[0].name, self.testInput1) + self.assertEqual(parsed.filter_contender[0], 'e') + self.assertEqual(parsed.benchmark_options[0], 'f') + + def test_benchmarksfiltered_with_remainder_after_doubleminus(self): + parsed = self.parser.parse_args( + ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', '--', 'g']) + self.assertFalse(parsed.display_aggregates_only) + self.assertTrue(parsed.utest) + self.assertEqual(parsed.mode, 'benchmarksfiltered') + self.assertEqual(parsed.test_baseline[0].name, self.testInput0) + self.assertEqual(parsed.filter_baseline[0], 'c') + self.assertEqual(parsed.test_contender[0].name, self.testInput1) + self.assertEqual(parsed.filter_contender[0], 'e') + self.assertEqual(parsed.benchmark_options[0], 'g') + + +if __name__ == '__main__': + # unittest.main() + main() + +# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 +# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off; +# kate: indent-mode python; remove-trailing-spaces modified; diff --git a/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test1_run1.json b/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test1_run1.json new file mode 100755 index 0000000000..601e327aef --- /dev/null +++ b/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test1_run1.json @@ -0,0 +1,119 @@ +{ + "context": { + "date": "2016-08-02 17:44:46", + "num_cpus": 4, + "mhz_per_cpu": 4228, + "cpu_scaling_enabled": false, + "library_build_type": "release" + }, + "benchmarks": [ + { + "name": "BM_SameTimes", + "iterations": 1000, + "real_time": 10, + "cpu_time": 10, + "time_unit": "ns" + }, + { + "name": "BM_2xFaster", + "iterations": 1000, + "real_time": 50, + "cpu_time": 50, + "time_unit": "ns" + }, + { + "name": "BM_2xSlower", + "iterations": 1000, + "real_time": 50, + "cpu_time": 50, + "time_unit": "ns" + }, + { + "name": "BM_1PercentFaster", + "iterations": 1000, + "real_time": 100, + "cpu_time": 100, + "time_unit": "ns" + }, + { + "name": "BM_1PercentSlower", + "iterations": 1000, + "real_time": 100, + "cpu_time": 100, + "time_unit": "ns" + }, + { + "name": "BM_10PercentFaster", + "iterations": 1000, + "real_time": 100, + "cpu_time": 100, + "time_unit": "ns" + }, + { + "name": "BM_10PercentSlower", + "iterations": 1000, + "real_time": 100, + "cpu_time": 100, + "time_unit": "ns" + }, + { + "name": "BM_100xSlower", + "iterations": 1000, + "real_time": 100, + "cpu_time": 100, + "time_unit": "ns" + }, + { + "name": "BM_100xFaster", + "iterations": 1000, + "real_time": 10000, + "cpu_time": 10000, + "time_unit": "ns" + }, + { + "name": "BM_10PercentCPUToTime", + "iterations": 1000, + "real_time": 100, + "cpu_time": 100, + "time_unit": "ns" + }, + { + "name": "BM_ThirdFaster", + "iterations": 1000, + "real_time": 100, + "cpu_time": 100, + "time_unit": "ns" + }, + { + "name": "MyComplexityTest_BigO", + "run_name": "MyComplexityTest", + "run_type": "aggregate", + "aggregate_name": "BigO", + "cpu_coefficient": 4.2749856294592886e+00, + "real_coefficient": 6.4789275289789780e+00, + "big_o": "N", + "time_unit": "ns" + }, + { + "name": "MyComplexityTest_RMS", + "run_name": "MyComplexityTest", + "run_type": "aggregate", + "aggregate_name": "RMS", + "rms": 4.5097802512472874e-03 + }, + { + "name": "BM_NotBadTimeUnit", + "iterations": 1000, + "real_time": 0.4, + "cpu_time": 0.5, + "time_unit": "s" + }, + { + "name": "BM_DifferentTimeUnit", + "iterations": 1, + "real_time": 1, + "cpu_time": 1, + "time_unit": "s" + } + ] +} diff --git a/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test1_run2.json b/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test1_run2.json new file mode 100755 index 0000000000..3cbcf39b0c --- /dev/null +++ b/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test1_run2.json @@ -0,0 +1,119 @@ +{ + "context": { + "date": "2016-08-02 17:44:46", + "num_cpus": 4, + "mhz_per_cpu": 4228, + "cpu_scaling_enabled": false, + "library_build_type": "release" + }, + "benchmarks": [ + { + "name": "BM_SameTimes", + "iterations": 1000, + "real_time": 10, + "cpu_time": 10, + "time_unit": "ns" + }, + { + "name": "BM_2xFaster", + "iterations": 1000, + "real_time": 25, + "cpu_time": 25, + "time_unit": "ns" + }, + { + "name": "BM_2xSlower", + "iterations": 20833333, + "real_time": 100, + "cpu_time": 100, + "time_unit": "ns" + }, + { + "name": "BM_1PercentFaster", + "iterations": 1000, + "real_time": 98.9999999, + "cpu_time": 98.9999999, + "time_unit": "ns" + }, + { + "name": "BM_1PercentSlower", + "iterations": 1000, + "real_time": 100.9999999, + "cpu_time": 100.9999999, + "time_unit": "ns" + }, + { + "name": "BM_10PercentFaster", + "iterations": 1000, + "real_time": 90, + "cpu_time": 90, + "time_unit": "ns" + }, + { + "name": "BM_10PercentSlower", + "iterations": 1000, + "real_time": 110, + "cpu_time": 110, + "time_unit": "ns" + }, + { + "name": "BM_100xSlower", + "iterations": 1000, + "real_time": 1.0000e+04, + "cpu_time": 1.0000e+04, + "time_unit": "ns" + }, + { + "name": "BM_100xFaster", + "iterations": 1000, + "real_time": 100, + "cpu_time": 100, + "time_unit": "ns" + }, + { + "name": "BM_10PercentCPUToTime", + "iterations": 1000, + "real_time": 110, + "cpu_time": 90, + "time_unit": "ns" + }, + { + "name": "BM_ThirdFaster", + "iterations": 1000, + "real_time": 66.665, + "cpu_time": 66.664, + "time_unit": "ns" + }, + { + "name": "MyComplexityTest_BigO", + "run_name": "MyComplexityTest", + "run_type": "aggregate", + "aggregate_name": "BigO", + "cpu_coefficient": 5.6215779594361486e+00, + "real_coefficient": 5.6288314793554610e+00, + "big_o": "N", + "time_unit": "ns" + }, + { + "name": "MyComplexityTest_RMS", + "run_name": "MyComplexityTest", + "run_type": "aggregate", + "aggregate_name": "RMS", + "rms": 3.3128901852342174e-03 + }, + { + "name": "BM_NotBadTimeUnit", + "iterations": 1000, + "real_time": 0.04, + "cpu_time": 0.6, + "time_unit": "s" + }, + { + "name": "BM_DifferentTimeUnit", + "iterations": 1, + "real_time": 1, + "cpu_time": 1, + "time_unit": "ns" + } + ] +} diff --git a/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test2_run.json b/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test2_run.json new file mode 100755 index 0000000000..15bc698030 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test2_run.json @@ -0,0 +1,81 @@ +{ + "context": { + "date": "2016-08-02 17:44:46", + "num_cpus": 4, + "mhz_per_cpu": 4228, + "cpu_scaling_enabled": false, + "library_build_type": "release" + }, + "benchmarks": [ + { + "name": "BM_Hi", + "iterations": 1234, + "real_time": 42, + "cpu_time": 24, + "time_unit": "ms" + }, + { + "name": "BM_Zero", + "iterations": 1000, + "real_time": 10, + "cpu_time": 10, + "time_unit": "ns" + }, + { + "name": "BM_Zero/4", + "iterations": 4000, + "real_time": 40, + "cpu_time": 40, + "time_unit": "ns" + }, + { + "name": "Prefix/BM_Zero", + "iterations": 2000, + "real_time": 20, + "cpu_time": 20, + "time_unit": "ns" + }, + { + "name": "Prefix/BM_Zero/3", + "iterations": 3000, + "real_time": 30, + "cpu_time": 30, + "time_unit": "ns" + }, + { + "name": "BM_One", + "iterations": 5000, + "real_time": 5, + "cpu_time": 5, + "time_unit": "ns" + }, + { + "name": "BM_One/4", + "iterations": 2000, + "real_time": 20, + "cpu_time": 20, + "time_unit": "ns" + }, + { + "name": "Prefix/BM_One", + "iterations": 1000, + "real_time": 10, + "cpu_time": 10, + "time_unit": "ns" + }, + { + "name": "Prefix/BM_One/3", + "iterations": 1500, + "real_time": 15, + "cpu_time": 15, + "time_unit": "ns" + }, + { + "name": "BM_Bye", + "iterations": 5321, + "real_time": 11, + "cpu_time": 63, + "time_unit": "ns" + } + ] +} diff --git a/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test3_run0.json b/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test3_run0.json new file mode 100755 index 0000000000..49f8b06143 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test3_run0.json @@ -0,0 +1,65 @@ +{ + "context": { + "date": "2016-08-02 17:44:46", + "num_cpus": 4, + "mhz_per_cpu": 4228, + "cpu_scaling_enabled": false, + "library_build_type": "release" + }, + "benchmarks": [ + { + "name": "BM_One", + "run_type": "aggregate", + "iterations": 1000, + "real_time": 10, + "cpu_time": 100, + "time_unit": "ns" + }, + { + "name": "BM_Two", + "iterations": 1000, + "real_time": 9, + "cpu_time": 90, + "time_unit": "ns" + }, + { + "name": "BM_Two", + "iterations": 1000, + "real_time": 8, + "cpu_time": 86, + "time_unit": "ns" + }, + { + "name": "short", + "run_type": "aggregate", + "iterations": 1000, + "real_time": 8, + "cpu_time": 80, + "time_unit": "ns" + }, + { + "name": "short", + "run_type": "aggregate", + "iterations": 1000, + "real_time": 8, + "cpu_time": 77, + "time_unit": "ns" + }, + { + "name": "medium", + "run_type": "iteration", + "iterations": 1000, + "real_time": 8, + "cpu_time": 80, + "time_unit": "ns" + }, + { + "name": "medium", + "run_type": "iteration", + "iterations": 1000, + "real_time": 9, + "cpu_time": 82, + "time_unit": "ns" + } + ] +} diff --git a/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test3_run1.json b/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test3_run1.json new file mode 100755 index 0000000000..acc5ba17ae --- /dev/null +++ b/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test3_run1.json @@ -0,0 +1,65 @@ +{ + "context": { + "date": "2016-08-02 17:44:46", + "num_cpus": 4, + "mhz_per_cpu": 4228, + "cpu_scaling_enabled": false, + "library_build_type": "release" + }, + "benchmarks": [ + { + "name": "BM_One", + "iterations": 1000, + "real_time": 9, + "cpu_time": 110, + "time_unit": "ns" + }, + { + "name": "BM_Two", + "run_type": "aggregate", + "iterations": 1000, + "real_time": 10, + "cpu_time": 89, + "time_unit": "ns" + }, + { + "name": "BM_Two", + "iterations": 1000, + "real_time": 7, + "cpu_time": 72, + "time_unit": "ns" + }, + { + "name": "short", + "run_type": "aggregate", + "iterations": 1000, + "real_time": 7, + "cpu_time": 75, + "time_unit": "ns" + }, + { + "name": "short", + "run_type": "aggregate", + "iterations": 762, + "real_time": 4.54, + "cpu_time": 66.6, + "time_unit": "ns" + }, + { + "name": "short", + "run_type": "iteration", + "iterations": 1000, + "real_time": 800, + "cpu_time": 1, + "time_unit": "ns" + }, + { + "name": "medium", + "run_type": "iteration", + "iterations": 1200, + "real_time": 5, + "cpu_time": 53, + "time_unit": "ns" + } + ] +} diff --git a/benchmarks/thirdparty/benchmark/tools/gbench/__init__.py b/benchmarks/thirdparty/benchmark/tools/gbench/__init__.py new file mode 100755 index 0000000000..fce1a1acfb --- /dev/null +++ b/benchmarks/thirdparty/benchmark/tools/gbench/__init__.py @@ -0,0 +1,8 @@ +"""Google Benchmark tooling""" + +__author__ = 'Eric Fiselier' +__email__ = 'eric@efcs.ca' +__versioninfo__ = (0, 5, 0) +__version__ = '.'.join(str(v) for v in __versioninfo__) + 'dev' + +__all__ = [] diff --git a/benchmarks/thirdparty/benchmark/tools/gbench/report.py b/benchmarks/thirdparty/benchmark/tools/gbench/report.py new file mode 100755 index 0000000000..5bd3a8d85d --- /dev/null +++ b/benchmarks/thirdparty/benchmark/tools/gbench/report.py @@ -0,0 +1,541 @@ +import unittest +"""report.py - Utilities for reporting statistics about benchmark results +""" +import os +import re +import copy + +from scipy.stats import mannwhitneyu + + +class BenchmarkColor(object): + def __init__(self, name, code): + self.name = name + self.code = code + + def __repr__(self): + return '%s%r' % (self.__class__.__name__, + (self.name, self.code)) + + def __format__(self, format): + return self.code + + +# Benchmark Colors Enumeration +BC_NONE = BenchmarkColor('NONE', '') +BC_MAGENTA = BenchmarkColor('MAGENTA', '\033[95m') +BC_CYAN = BenchmarkColor('CYAN', '\033[96m') +BC_OKBLUE = BenchmarkColor('OKBLUE', '\033[94m') +BC_OKGREEN = BenchmarkColor('OKGREEN', '\033[32m') +BC_HEADER = BenchmarkColor('HEADER', '\033[92m') +BC_WARNING = BenchmarkColor('WARNING', '\033[93m') +BC_WHITE = BenchmarkColor('WHITE', '\033[97m') +BC_FAIL = BenchmarkColor('FAIL', '\033[91m') +BC_ENDC = BenchmarkColor('ENDC', '\033[0m') +BC_BOLD = BenchmarkColor('BOLD', '\033[1m') +BC_UNDERLINE = BenchmarkColor('UNDERLINE', '\033[4m') + +UTEST_MIN_REPETITIONS = 2 +UTEST_OPTIMAL_REPETITIONS = 9 # Lowest reasonable number, More is better. +UTEST_COL_NAME = "_pvalue" + + +def color_format(use_color, fmt_str, *args, **kwargs): + """ + Return the result of 'fmt_str.format(*args, **kwargs)' after transforming + 'args' and 'kwargs' according to the value of 'use_color'. If 'use_color' + is False then all color codes in 'args' and 'kwargs' are replaced with + the empty string. + """ + assert use_color is True or use_color is False + if not use_color: + args = [arg if not isinstance(arg, BenchmarkColor) else BC_NONE + for arg in args] + kwargs = {key: arg if not isinstance(arg, BenchmarkColor) else BC_NONE + for key, arg in kwargs.items()} + return fmt_str.format(*args, **kwargs) + + +def find_longest_name(benchmark_list): + """ + Return the length of the longest benchmark name in a given list of + benchmark JSON objects + """ + longest_name = 1 + for bc in benchmark_list: + if len(bc['name']) > longest_name: + longest_name = len(bc['name']) + return longest_name + + +def calculate_change(old_val, new_val): + """ + Return a float representing the decimal change between old_val and new_val. + """ + if old_val == 0 and new_val == 0: + return 0.0 + if old_val == 0: + return float(new_val - old_val) / (float(old_val + new_val) / 2) + return float(new_val - old_val) / abs(old_val) + + +def filter_benchmark(json_orig, family, replacement=""): + """ + Apply a filter to the json, and only leave the 'family' of benchmarks. + """ + regex = re.compile(family) + filtered = {} + filtered['benchmarks'] = [] + for be in json_orig['benchmarks']: + if not regex.search(be['name']): + continue + filteredbench = copy.deepcopy(be) # Do NOT modify the old name! + filteredbench['name'] = regex.sub(replacement, filteredbench['name']) + filtered['benchmarks'].append(filteredbench) + return filtered + + +def get_unique_benchmark_names(json): + """ + While *keeping* the order, give all the unique 'names' used for benchmarks. + """ + seen = set() + uniqued = [x['name'] for x in json['benchmarks'] + if x['name'] not in seen and + (seen.add(x['name']) or True)] + return uniqued + + +def intersect(list1, list2): + """ + Given two lists, get a new list consisting of the elements only contained + in *both of the input lists*, while preserving the ordering. + """ + return [x for x in list1 if x in list2] + + +def is_potentially_comparable_benchmark(x): + return ('time_unit' in x and 'real_time' in x and 'cpu_time' in x) + + +def partition_benchmarks(json1, json2): + """ + While preserving the ordering, find benchmarks with the same names in + both of the inputs, and group them. + (i.e. partition/filter into groups with common name) + """ + json1_unique_names = get_unique_benchmark_names(json1) + json2_unique_names = get_unique_benchmark_names(json2) + names = intersect(json1_unique_names, json2_unique_names) + partitions = [] + for name in names: + time_unit = None + # Pick the time unit from the first entry of the lhs benchmark. + # We should be careful not to crash with unexpected input. + for x in json1['benchmarks']: + if (x['name'] == name and is_potentially_comparable_benchmark(x)): + time_unit = x['time_unit'] + break + if time_unit is None: + continue + # Filter by name and time unit. + # All the repetitions are assumed to be comparable. + lhs = [x for x in json1['benchmarks'] if x['name'] == name and + x['time_unit'] == time_unit] + rhs = [x for x in json2['benchmarks'] if x['name'] == name and + x['time_unit'] == time_unit] + partitions.append([lhs, rhs]) + return partitions + + +def extract_field(partition, field_name): + # The count of elements may be different. We want *all* of them. + lhs = [x[field_name] for x in partition[0]] + rhs = [x[field_name] for x in partition[1]] + return [lhs, rhs] + +def calc_utest(timings_cpu, timings_time): + min_rep_cnt = min(len(timings_time[0]), + len(timings_time[1]), + len(timings_cpu[0]), + len(timings_cpu[1])) + + # Does *everything* has at least UTEST_MIN_REPETITIONS repetitions? + if min_rep_cnt < UTEST_MIN_REPETITIONS: + return False, None, None + + time_pvalue = mannwhitneyu( + timings_time[0], timings_time[1], alternative='two-sided').pvalue + cpu_pvalue = mannwhitneyu( + timings_cpu[0], timings_cpu[1], alternative='two-sided').pvalue + + return (min_rep_cnt >= UTEST_OPTIMAL_REPETITIONS), cpu_pvalue, time_pvalue + +def print_utest(partition, utest_alpha, first_col_width, use_color=True): + def get_utest_color(pval): + return BC_FAIL if pval >= utest_alpha else BC_OKGREEN + + timings_time = extract_field(partition, 'real_time') + timings_cpu = extract_field(partition, 'cpu_time') + have_optimal_repetitions, cpu_pvalue, time_pvalue = calc_utest(timings_cpu, timings_time) + + # Check if we failed miserably with minimum required repetitions for utest + if not have_optimal_repetitions and cpu_pvalue is None and time_pvalue is None: + return [] + + dsc = "U Test, Repetitions: {} vs {}".format( + len(timings_cpu[0]), len(timings_cpu[1])) + dsc_color = BC_OKGREEN + + # We still got some results to show but issue a warning about it. + if not have_optimal_repetitions: + dsc_color = BC_WARNING + dsc += ". WARNING: Results unreliable! {}+ repetitions recommended.".format( + UTEST_OPTIMAL_REPETITIONS) + + special_str = "{}{:<{}s}{endc}{}{:16.4f}{endc}{}{:16.4f}{endc}{} {}" + + last_name = partition[0][0]['name'] + return [color_format(use_color, + special_str, + BC_HEADER, + "{}{}".format(last_name, UTEST_COL_NAME), + first_col_width, + get_utest_color(time_pvalue), time_pvalue, + get_utest_color(cpu_pvalue), cpu_pvalue, + dsc_color, dsc, + endc=BC_ENDC)] + + +def generate_difference_report( + json1, + json2, + display_aggregates_only=False, + utest=False, + utest_alpha=0.05, + use_color=True): + """ + Calculate and report the difference between each test of two benchmarks + runs specified as 'json1' and 'json2'. + """ + assert utest is True or utest is False + first_col_width = find_longest_name(json1['benchmarks']) + + def find_test(name): + for b in json2['benchmarks']: + if b['name'] == name: + return b + return None + + first_col_width = max( + first_col_width, + len('Benchmark')) + first_col_width += len(UTEST_COL_NAME) + first_line = "{:<{}s}Time CPU Time Old Time New CPU Old CPU New".format( + 'Benchmark', 12 + first_col_width) + output_strs = [first_line, '-' * len(first_line)] + + partitions = partition_benchmarks(json1, json2) + for partition in partitions: + # Careful, we may have different repetition count. + for i in range(min(len(partition[0]), len(partition[1]))): + bn = partition[0][i] + other_bench = partition[1][i] + + # *If* we were asked to only display aggregates, + # and if it is non-aggregate, then skip it. + if display_aggregates_only and 'run_type' in bn and 'run_type' in other_bench: + assert bn['run_type'] == other_bench['run_type'] + if bn['run_type'] != 'aggregate': + continue + + fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}" + + def get_color(res): + if res > 0.05: + return BC_FAIL + elif res > -0.07: + return BC_WHITE + else: + return BC_CYAN + + tres = calculate_change(bn['real_time'], other_bench['real_time']) + cpures = calculate_change(bn['cpu_time'], other_bench['cpu_time']) + output_strs += [color_format(use_color, + fmt_str, + BC_HEADER, + bn['name'], + first_col_width, + get_color(tres), + tres, + get_color(cpures), + cpures, + bn['real_time'], + other_bench['real_time'], + bn['cpu_time'], + other_bench['cpu_time'], + endc=BC_ENDC)] + + # After processing the whole partition, if requested, do the U test. + if utest: + output_strs += print_utest(partition, + utest_alpha=utest_alpha, + first_col_width=first_col_width, + use_color=use_color) + + return output_strs + + +############################################################################### +# Unit tests + + +class TestGetUniqueBenchmarkNames(unittest.TestCase): + def load_results(self): + import json + testInputs = os.path.join( + os.path.dirname( + os.path.realpath(__file__)), + 'Inputs') + testOutput = os.path.join(testInputs, 'test3_run0.json') + with open(testOutput, 'r') as f: + json = json.load(f) + return json + + def test_basic(self): + expect_lines = [ + 'BM_One', + 'BM_Two', + 'short', # These two are not sorted + 'medium', # These two are not sorted + ] + json = self.load_results() + output_lines = get_unique_benchmark_names(json) + print("\n") + print("\n".join(output_lines)) + self.assertEqual(len(output_lines), len(expect_lines)) + for i in range(0, len(output_lines)): + self.assertEqual(expect_lines[i], output_lines[i]) + + +class TestReportDifference(unittest.TestCase): + def load_results(self): + import json + testInputs = os.path.join( + os.path.dirname( + os.path.realpath(__file__)), + 'Inputs') + testOutput1 = os.path.join(testInputs, 'test1_run1.json') + testOutput2 = os.path.join(testInputs, 'test1_run2.json') + with open(testOutput1, 'r') as f: + json1 = json.load(f) + with open(testOutput2, 'r') as f: + json2 = json.load(f) + return json1, json2 + + def test_basic(self): + expect_lines = [ + ['BM_SameTimes', '+0.0000', '+0.0000', '10', '10', '10', '10'], + ['BM_2xFaster', '-0.5000', '-0.5000', '50', '25', '50', '25'], + ['BM_2xSlower', '+1.0000', '+1.0000', '50', '100', '50', '100'], + ['BM_1PercentFaster', '-0.0100', '-0.0100', '100', '99', '100', '99'], + ['BM_1PercentSlower', '+0.0100', '+0.0100', '100', '101', '100', '101'], + ['BM_10PercentFaster', '-0.1000', '-0.1000', '100', '90', '100', '90'], + ['BM_10PercentSlower', '+0.1000', '+0.1000', '100', '110', '100', '110'], + ['BM_100xSlower', '+99.0000', '+99.0000', + '100', '10000', '100', '10000'], + ['BM_100xFaster', '-0.9900', '-0.9900', + '10000', '100', '10000', '100'], + ['BM_10PercentCPUToTime', '+0.1000', + '-0.1000', '100', '110', '100', '90'], + ['BM_ThirdFaster', '-0.3333', '-0.3334', '100', '67', '100', '67'], + ['BM_NotBadTimeUnit', '-0.9000', '+0.2000', '0', '0', '0', '1'], + ] + json1, json2 = self.load_results() + output_lines_with_header = generate_difference_report( + json1, json2, use_color=False) + output_lines = output_lines_with_header[2:] + print("\n") + print("\n".join(output_lines_with_header)) + self.assertEqual(len(output_lines), len(expect_lines)) + for i in range(0, len(output_lines)): + parts = [x for x in output_lines[i].split(' ') if x] + self.assertEqual(len(parts), 7) + self.assertEqual(expect_lines[i], parts) + + +class TestReportDifferenceBetweenFamilies(unittest.TestCase): + def load_result(self): + import json + testInputs = os.path.join( + os.path.dirname( + os.path.realpath(__file__)), + 'Inputs') + testOutput = os.path.join(testInputs, 'test2_run.json') + with open(testOutput, 'r') as f: + json = json.load(f) + return json + + def test_basic(self): + expect_lines = [ + ['.', '-0.5000', '-0.5000', '10', '5', '10', '5'], + ['./4', '-0.5000', '-0.5000', '40', '20', '40', '20'], + ['Prefix/.', '-0.5000', '-0.5000', '20', '10', '20', '10'], + ['Prefix/./3', '-0.5000', '-0.5000', '30', '15', '30', '15'], + ] + json = self.load_result() + json1 = filter_benchmark(json, "BM_Z.ro", ".") + json2 = filter_benchmark(json, "BM_O.e", ".") + output_lines_with_header = generate_difference_report( + json1, json2, use_color=False) + output_lines = output_lines_with_header[2:] + print("\n") + print("\n".join(output_lines_with_header)) + self.assertEqual(len(output_lines), len(expect_lines)) + for i in range(0, len(output_lines)): + parts = [x for x in output_lines[i].split(' ') if x] + self.assertEqual(len(parts), 7) + self.assertEqual(expect_lines[i], parts) + + +class TestReportDifferenceWithUTest(unittest.TestCase): + def load_results(self): + import json + testInputs = os.path.join( + os.path.dirname( + os.path.realpath(__file__)), + 'Inputs') + testOutput1 = os.path.join(testInputs, 'test3_run0.json') + testOutput2 = os.path.join(testInputs, 'test3_run1.json') + with open(testOutput1, 'r') as f: + json1 = json.load(f) + with open(testOutput2, 'r') as f: + json2 = json.load(f) + return json1, json2 + + def test_utest(self): + expect_lines = [] + expect_lines = [ + ['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'], + ['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'], + ['BM_Two', '-0.1250', '-0.1628', '8', '7', '86', '72'], + ['BM_Two_pvalue', + '0.6985', + '0.6985', + 'U', + 'Test,', + 'Repetitions:', + '2', + 'vs', + '2.', + 'WARNING:', + 'Results', + 'unreliable!', + '9+', + 'repetitions', + 'recommended.'], + ['short', '-0.1250', '-0.0625', '8', '7', '80', '75'], + ['short', '-0.4325', '-0.1351', '8', '5', '77', '67'], + ['short_pvalue', + '0.7671', + '0.1489', + 'U', + 'Test,', + 'Repetitions:', + '2', + 'vs', + '3.', + 'WARNING:', + 'Results', + 'unreliable!', + '9+', + 'repetitions', + 'recommended.'], + ['medium', '-0.3750', '-0.3375', '8', '5', '80', '53'], + ] + json1, json2 = self.load_results() + output_lines_with_header = generate_difference_report( + json1, json2, utest=True, utest_alpha=0.05, use_color=False) + output_lines = output_lines_with_header[2:] + print("\n") + print("\n".join(output_lines_with_header)) + self.assertEqual(len(output_lines), len(expect_lines)) + for i in range(0, len(output_lines)): + parts = [x for x in output_lines[i].split(' ') if x] + self.assertEqual(expect_lines[i], parts) + + +class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly( + unittest.TestCase): + def load_results(self): + import json + testInputs = os.path.join( + os.path.dirname( + os.path.realpath(__file__)), + 'Inputs') + testOutput1 = os.path.join(testInputs, 'test3_run0.json') + testOutput2 = os.path.join(testInputs, 'test3_run1.json') + with open(testOutput1, 'r') as f: + json1 = json.load(f) + with open(testOutput2, 'r') as f: + json2 = json.load(f) + return json1, json2 + + def test_utest(self): + expect_lines = [] + expect_lines = [ + ['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'], + ['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'], + ['BM_Two', '-0.1250', '-0.1628', '8', '7', '86', '72'], + ['BM_Two_pvalue', + '0.6985', + '0.6985', + 'U', + 'Test,', + 'Repetitions:', + '2', + 'vs', + '2.', + 'WARNING:', + 'Results', + 'unreliable!', + '9+', + 'repetitions', + 'recommended.'], + ['short', '-0.1250', '-0.0625', '8', '7', '80', '75'], + ['short', '-0.4325', '-0.1351', '8', '5', '77', '67'], + ['short_pvalue', + '0.7671', + '0.1489', + 'U', + 'Test,', + 'Repetitions:', + '2', + 'vs', + '3.', + 'WARNING:', + 'Results', + 'unreliable!', + '9+', + 'repetitions', + 'recommended.'], + ] + json1, json2 = self.load_results() + output_lines_with_header = generate_difference_report( + json1, json2, display_aggregates_only=True, + utest=True, utest_alpha=0.05, use_color=False) + output_lines = output_lines_with_header[2:] + print("\n") + print("\n".join(output_lines_with_header)) + self.assertEqual(len(output_lines), len(expect_lines)) + for i in range(0, len(output_lines)): + parts = [x for x in output_lines[i].split(' ') if x] + self.assertEqual(expect_lines[i], parts) + + +if __name__ == '__main__': + unittest.main() + +# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 +# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off; +# kate: indent-mode python; remove-trailing-spaces modified; diff --git a/benchmarks/thirdparty/benchmark/tools/gbench/util.py b/benchmarks/thirdparty/benchmark/tools/gbench/util.py new file mode 100755 index 0000000000..661c4bad8d --- /dev/null +++ b/benchmarks/thirdparty/benchmark/tools/gbench/util.py @@ -0,0 +1,163 @@ +"""util.py - General utilities for running, loading, and processing benchmarks +""" +import json +import os +import tempfile +import subprocess +import sys + +# Input file type enumeration +IT_Invalid = 0 +IT_JSON = 1 +IT_Executable = 2 + +_num_magic_bytes = 2 if sys.platform.startswith('win') else 4 + + +def is_executable_file(filename): + """ + Return 'True' if 'filename' names a valid file which is likely + an executable. A file is considered an executable if it starts with the + magic bytes for a EXE, Mach O, or ELF file. + """ + if not os.path.isfile(filename): + return False + with open(filename, mode='rb') as f: + magic_bytes = f.read(_num_magic_bytes) + if sys.platform == 'darwin': + return magic_bytes in [ + b'\xfe\xed\xfa\xce', # MH_MAGIC + b'\xce\xfa\xed\xfe', # MH_CIGAM + b'\xfe\xed\xfa\xcf', # MH_MAGIC_64 + b'\xcf\xfa\xed\xfe', # MH_CIGAM_64 + b'\xca\xfe\xba\xbe', # FAT_MAGIC + b'\xbe\xba\xfe\xca' # FAT_CIGAM + ] + elif sys.platform.startswith('win'): + return magic_bytes == b'MZ' + else: + return magic_bytes == b'\x7FELF' + + +def is_json_file(filename): + """ + Returns 'True' if 'filename' names a valid JSON output file. + 'False' otherwise. + """ + try: + with open(filename, 'r') as f: + json.load(f) + return True + except BaseException: + pass + return False + + +def classify_input_file(filename): + """ + Return a tuple (type, msg) where 'type' specifies the classified type + of 'filename'. If 'type' is 'IT_Invalid' then 'msg' is a human readable + string represeting the error. + """ + ftype = IT_Invalid + err_msg = None + if not os.path.exists(filename): + err_msg = "'%s' does not exist" % filename + elif not os.path.isfile(filename): + err_msg = "'%s' does not name a file" % filename + elif is_executable_file(filename): + ftype = IT_Executable + elif is_json_file(filename): + ftype = IT_JSON + else: + err_msg = "'%s' does not name a valid benchmark executable or JSON file" % filename + return ftype, err_msg + + +def check_input_file(filename): + """ + Classify the file named by 'filename' and return the classification. + If the file is classified as 'IT_Invalid' print an error message and exit + the program. + """ + ftype, msg = classify_input_file(filename) + if ftype == IT_Invalid: + print("Invalid input file: %s" % msg) + sys.exit(1) + return ftype + + +def find_benchmark_flag(prefix, benchmark_flags): + """ + Search the specified list of flags for a flag matching `` and + if it is found return the arg it specifies. If specified more than once the + last value is returned. If the flag is not found None is returned. + """ + assert prefix.startswith('--') and prefix.endswith('=') + result = None + for f in benchmark_flags: + if f.startswith(prefix): + result = f[len(prefix):] + return result + + +def remove_benchmark_flags(prefix, benchmark_flags): + """ + Return a new list containing the specified benchmark_flags except those + with the specified prefix. + """ + assert prefix.startswith('--') and prefix.endswith('=') + return [f for f in benchmark_flags if not f.startswith(prefix)] + + +def load_benchmark_results(fname): + """ + Read benchmark output from a file and return the JSON object. + REQUIRES: 'fname' names a file containing JSON benchmark output. + """ + with open(fname, 'r') as f: + return json.load(f) + + +def run_benchmark(exe_name, benchmark_flags): + """ + Run a benchmark specified by 'exe_name' with the specified + 'benchmark_flags'. The benchmark is run directly as a subprocess to preserve + real time console output. + RETURNS: A JSON object representing the benchmark output + """ + output_name = find_benchmark_flag('--benchmark_out=', + benchmark_flags) + is_temp_output = False + if output_name is None: + is_temp_output = True + thandle, output_name = tempfile.mkstemp() + os.close(thandle) + benchmark_flags = list(benchmark_flags) + \ + ['--benchmark_out=%s' % output_name] + + cmd = [exe_name] + benchmark_flags + print("RUNNING: %s" % ' '.join(cmd)) + exitCode = subprocess.call(cmd) + if exitCode != 0: + print('TEST FAILED...') + sys.exit(exitCode) + json_res = load_benchmark_results(output_name) + if is_temp_output: + os.unlink(output_name) + return json_res + + +def run_or_load_benchmark(filename, benchmark_flags): + """ + Get the results for a specified benchmark. If 'filename' specifies + an executable benchmark then the results are generated by running the + benchmark. Otherwise 'filename' must name a valid JSON output file, + which is loaded and the result returned. + """ + ftype = check_input_file(filename) + if ftype == IT_JSON: + return load_benchmark_results(filename) + if ftype == IT_Executable: + return run_benchmark(filename, benchmark_flags) + raise ValueError('Unknown file type %s' % ftype) diff --git a/benchmarks/thirdparty/benchmark/tools/requirements.txt b/benchmarks/thirdparty/benchmark/tools/requirements.txt new file mode 100755 index 0000000000..3b3331b5af --- /dev/null +++ b/benchmarks/thirdparty/benchmark/tools/requirements.txt @@ -0,0 +1 @@ +scipy>=1.5.0 \ No newline at end of file diff --git a/benchmarks/thirdparty/benchmark/tools/strip_asm.py b/benchmarks/thirdparty/benchmark/tools/strip_asm.py new file mode 100755 index 0000000000..9030550b43 --- /dev/null +++ b/benchmarks/thirdparty/benchmark/tools/strip_asm.py @@ -0,0 +1,151 @@ +#!/usr/bin/env python + +""" +strip_asm.py - Cleanup ASM output for the specified file +""" + +from argparse import ArgumentParser +import sys +import os +import re + +def find_used_labels(asm): + found = set() + label_re = re.compile("\s*j[a-z]+\s+\.L([a-zA-Z0-9][a-zA-Z0-9_]*)") + for l in asm.splitlines(): + m = label_re.match(l) + if m: + found.add('.L%s' % m.group(1)) + return found + + +def normalize_labels(asm): + decls = set() + label_decl = re.compile("^[.]{0,1}L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)") + for l in asm.splitlines(): + m = label_decl.match(l) + if m: + decls.add(m.group(0)) + if len(decls) == 0: + return asm + needs_dot = next(iter(decls))[0] != '.' + if not needs_dot: + return asm + for ld in decls: + asm = re.sub("(^|\s+)" + ld + "(?=:|\s)", '\\1.' + ld, asm) + return asm + + +def transform_labels(asm): + asm = normalize_labels(asm) + used_decls = find_used_labels(asm) + new_asm = '' + label_decl = re.compile("^\.L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)") + for l in asm.splitlines(): + m = label_decl.match(l) + if not m or m.group(0) in used_decls: + new_asm += l + new_asm += '\n' + return new_asm + + +def is_identifier(tk): + if len(tk) == 0: + return False + first = tk[0] + if not first.isalpha() and first != '_': + return False + for i in range(1, len(tk)): + c = tk[i] + if not c.isalnum() and c != '_': + return False + return True + +def process_identifiers(l): + """ + process_identifiers - process all identifiers and modify them to have + consistent names across all platforms; specifically across ELF and MachO. + For example, MachO inserts an additional understore at the beginning of + names. This function removes that. + """ + parts = re.split(r'([a-zA-Z0-9_]+)', l) + new_line = '' + for tk in parts: + if is_identifier(tk): + if tk.startswith('__Z'): + tk = tk[1:] + elif tk.startswith('_') and len(tk) > 1 and \ + tk[1].isalpha() and tk[1] != 'Z': + tk = tk[1:] + new_line += tk + return new_line + + +def process_asm(asm): + """ + Strip the ASM of unwanted directives and lines + """ + new_contents = '' + asm = transform_labels(asm) + + # TODO: Add more things we want to remove + discard_regexes = [ + re.compile("\s+\..*$"), # directive + re.compile("\s*#(NO_APP|APP)$"), #inline ASM + re.compile("\s*#.*$"), # comment line + re.compile("\s*\.globa?l\s*([.a-zA-Z_][a-zA-Z0-9$_.]*)"), #global directive + re.compile("\s*\.(string|asciz|ascii|[1248]?byte|short|word|long|quad|value|zero)"), + ] + keep_regexes = [ + + ] + fn_label_def = re.compile("^[a-zA-Z_][a-zA-Z0-9_.]*:") + for l in asm.splitlines(): + # Remove Mach-O attribute + l = l.replace('@GOTPCREL', '') + add_line = True + for reg in discard_regexes: + if reg.match(l) is not None: + add_line = False + break + for reg in keep_regexes: + if reg.match(l) is not None: + add_line = True + break + if add_line: + if fn_label_def.match(l) and len(new_contents) != 0: + new_contents += '\n' + l = process_identifiers(l) + new_contents += l + new_contents += '\n' + return new_contents + +def main(): + parser = ArgumentParser( + description='generate a stripped assembly file') + parser.add_argument( + 'input', metavar='input', type=str, nargs=1, + help='An input assembly file') + parser.add_argument( + 'out', metavar='output', type=str, nargs=1, + help='The output file') + args, unknown_args = parser.parse_known_args() + input = args.input[0] + output = args.out[0] + if not os.path.isfile(input): + print(("ERROR: input file '%s' does not exist") % input) + sys.exit(1) + contents = None + with open(input, 'r') as f: + contents = f.read() + new_contents = process_asm(contents) + with open(output, 'w') as f: + f.write(new_contents) + + +if __name__ == '__main__': + main() + +# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 +# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off; +# kate: indent-mode python; remove-trailing-spaces modified; From d7d34df789e13f2a6da26568f4a4dbfa1e6a8e70 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sat, 23 Jan 2021 14:10:12 +0100 Subject: [PATCH 028/113] :fire: use fetchcontent --- benchmarks/CMakeLists.txt | 23 +- benchmarks/thirdparty/benchmark/.clang-format | 5 - .../.github/ISSUE_TEMPLATE/bug_report.md | 32 - .../.github/ISSUE_TEMPLATE/feature_request.md | 20 - .../.github/workflows/build-and-test.yml | 38 - .../benchmark/.github/workflows/pylint.yml | 26 - .../.github/workflows/test_bindings.yml | 24 - benchmarks/thirdparty/benchmark/.gitignore | 66 - .../benchmark/.travis-libcxx-setup.sh | 28 - benchmarks/thirdparty/benchmark/.travis.yml | 231 --- .../thirdparty/benchmark/.ycm_extra_conf.py | 115 -- benchmarks/thirdparty/benchmark/AUTHORS | 57 - benchmarks/thirdparty/benchmark/BUILD.bazel | 44 - .../thirdparty/benchmark/CMakeLists.txt | 286 --- .../thirdparty/benchmark/CONTRIBUTING.md | 58 - benchmarks/thirdparty/benchmark/CONTRIBUTORS | 79 - benchmarks/thirdparty/benchmark/LICENSE | 202 --- benchmarks/thirdparty/benchmark/README.md | 1319 -------------- benchmarks/thirdparty/benchmark/WORKSPACE | 36 - benchmarks/thirdparty/benchmark/_config.yml | 1 - benchmarks/thirdparty/benchmark/appveyor.yml | 50 - .../benchmark/bindings/python/build_defs.bzl | 25 - .../python/google_benchmark/__init__.py | 156 -- .../python/google_benchmark/benchmark.cc | 180 -- .../python/google_benchmark/example.py | 136 -- .../benchmark/bindings/python/pybind11.BUILD | 20 - .../bindings/python/python_headers.BUILD | 6 - .../bindings/python/requirements.txt | 2 - .../benchmark/cmake/AddCXXCompilerFlag.cmake | 74 - .../benchmark/cmake/CXXFeatureCheck.cmake | 69 - .../benchmark/cmake/Config.cmake.in | 1 - .../benchmark/cmake/GetGitVersion.cmake | 54 - .../benchmark/cmake/GoogleTest.cmake | 41 - .../benchmark/cmake/GoogleTest.cmake.in | 58 - .../benchmark/cmake/benchmark.pc.in | 12 - .../benchmark/cmake/gnu_posix_regex.cpp | 12 - .../benchmark/cmake/llvm-toolchain.cmake | 8 - .../benchmark/cmake/posix_regex.cpp | 14 - .../benchmark/cmake/split_list.cmake | 3 - .../thirdparty/benchmark/cmake/std_regex.cpp | 10 - .../benchmark/cmake/steady_clock.cpp | 7 - .../cmake/thread_safety_attributes.cpp | 4 - .../thirdparty/benchmark/conan/CMakeLists.txt | 7 - .../conan/test_package/CMakeLists.txt | 10 - .../benchmark/conan/test_package/conanfile.py | 19 - .../conan/test_package/test_package.cpp | 18 - benchmarks/thirdparty/benchmark/conanfile.py | 79 - .../thirdparty/benchmark/dependencies.md | 18 - .../benchmark/docs/AssemblyTests.md | 147 -- .../thirdparty/benchmark/docs/_config.yml | 1 - .../thirdparty/benchmark/docs/releasing.md | 16 - benchmarks/thirdparty/benchmark/docs/tools.md | 203 --- .../benchmark/include/benchmark/benchmark.h | 1601 ----------------- benchmarks/thirdparty/benchmark/setup.py | 140 -- .../thirdparty/benchmark/src/CMakeLists.txt | 114 -- .../thirdparty/benchmark/src/arraysize.h | 33 - .../thirdparty/benchmark/src/benchmark.cc | 499 ----- .../benchmark/src/benchmark_api_internal.cc | 15 - .../benchmark/src/benchmark_api_internal.h | 53 - .../benchmark/src/benchmark_main.cc | 17 - .../benchmark/src/benchmark_name.cc | 58 - .../benchmark/src/benchmark_register.cc | 515 ------ .../benchmark/src/benchmark_register.h | 107 -- .../benchmark/src/benchmark_runner.cc | 362 ---- .../benchmark/src/benchmark_runner.h | 51 - benchmarks/thirdparty/benchmark/src/check.h | 82 - .../thirdparty/benchmark/src/colorprint.cc | 188 -- .../thirdparty/benchmark/src/colorprint.h | 33 - .../benchmark/src/commandlineflags.cc | 228 --- .../benchmark/src/commandlineflags.h | 103 -- .../thirdparty/benchmark/src/complexity.cc | 238 --- .../thirdparty/benchmark/src/complexity.h | 55 - .../benchmark/src/console_reporter.cc | 177 -- .../thirdparty/benchmark/src/counter.cc | 80 - benchmarks/thirdparty/benchmark/src/counter.h | 32 - .../thirdparty/benchmark/src/csv_reporter.cc | 154 -- .../thirdparty/benchmark/src/cycleclock.h | 206 --- .../benchmark/src/internal_macros.h | 94 - .../thirdparty/benchmark/src/json_reporter.cc | 255 --- benchmarks/thirdparty/benchmark/src/log.h | 74 - benchmarks/thirdparty/benchmark/src/mutex.h | 155 -- benchmarks/thirdparty/benchmark/src/re.h | 158 -- .../thirdparty/benchmark/src/reporter.cc | 105 -- benchmarks/thirdparty/benchmark/src/sleep.cc | 51 - benchmarks/thirdparty/benchmark/src/sleep.h | 15 - .../thirdparty/benchmark/src/statistics.cc | 193 -- .../thirdparty/benchmark/src/statistics.h | 37 - .../thirdparty/benchmark/src/string_util.cc | 255 --- .../thirdparty/benchmark/src/string_util.h | 59 - .../thirdparty/benchmark/src/sysinfo.cc | 712 -------- .../thirdparty/benchmark/src/thread_manager.h | 64 - .../thirdparty/benchmark/src/thread_timer.h | 86 - benchmarks/thirdparty/benchmark/src/timers.cc | 244 --- benchmarks/thirdparty/benchmark/src/timers.h | 48 - .../benchmark/test/AssemblyTests.cmake | 46 - .../thirdparty/benchmark/test/CMakeLists.txt | 263 --- .../benchmark/test/args_product_test.cc | 77 - .../thirdparty/benchmark/test/basic_test.cc | 136 -- .../benchmark/test/benchmark_gtest.cc | 128 -- .../benchmark/test/benchmark_name_gtest.cc | 74 - .../benchmark/test/benchmark_test.cc | 245 --- .../test/clobber_memory_assembly_test.cc | 64 - .../benchmark/test/commandlineflags_gtest.cc | 201 --- .../benchmark/test/complexity_test.cc | 213 --- .../thirdparty/benchmark/test/cxx03_test.cc | 63 - .../benchmark/test/diagnostics_test.cc | 80 - .../test/display_aggregates_only_test.cc | 43 - .../test/donotoptimize_assembly_test.cc | 163 -- .../benchmark/test/donotoptimize_test.cc | 52 - .../thirdparty/benchmark/test/filter_test.cc | 104 -- .../thirdparty/benchmark/test/fixture_test.cc | 49 - .../benchmark/test/internal_threading_test.cc | 184 -- .../benchmark/test/link_main_test.cc | 8 - .../thirdparty/benchmark/test/map_test.cc | 57 - .../benchmark/test/memory_manager_test.cc | 44 - .../benchmark/test/multiple_ranges_test.cc | 96 - .../thirdparty/benchmark/test/options_test.cc | 75 - .../thirdparty/benchmark/test/output_test.h | 213 --- .../benchmark/test/output_test_helper.cc | 515 ------ .../benchmark/test/register_benchmark_test.cc | 184 -- .../test/report_aggregates_only_test.cc | 39 - .../benchmark/test/reporter_output_test.cc | 747 -------- .../benchmark/test/skip_with_error_test.cc | 195 -- .../benchmark/test/state_assembly_test.cc | 68 - .../benchmark/test/statistics_gtest.cc | 28 - .../benchmark/test/string_util_gtest.cc | 153 -- .../benchmark/test/templated_fixture_test.cc | 28 - .../test/user_counters_tabular_test.cc | 285 --- .../benchmark/test/user_counters_test.cc | 531 ------ .../test/user_counters_thousands_test.cc | 173 -- .../thirdparty/benchmark/tools/compare.py | 416 ----- .../tools/gbench/Inputs/test1_run1.json | 119 -- .../tools/gbench/Inputs/test1_run2.json | 119 -- .../tools/gbench/Inputs/test2_run.json | 81 - .../tools/gbench/Inputs/test3_run0.json | 65 - .../tools/gbench/Inputs/test3_run1.json | 65 - .../benchmark/tools/gbench/__init__.py | 8 - .../benchmark/tools/gbench/report.py | 541 ------ .../thirdparty/benchmark/tools/gbench/util.py | 163 -- .../benchmark/tools/requirements.txt | 1 - .../thirdparty/benchmark/tools/strip_asm.py | 151 -- 141 files changed, 15 insertions(+), 19234 deletions(-) delete mode 100755 benchmarks/thirdparty/benchmark/.clang-format delete mode 100755 benchmarks/thirdparty/benchmark/.github/ISSUE_TEMPLATE/bug_report.md delete mode 100755 benchmarks/thirdparty/benchmark/.github/ISSUE_TEMPLATE/feature_request.md delete mode 100755 benchmarks/thirdparty/benchmark/.github/workflows/build-and-test.yml delete mode 100755 benchmarks/thirdparty/benchmark/.github/workflows/pylint.yml delete mode 100755 benchmarks/thirdparty/benchmark/.github/workflows/test_bindings.yml delete mode 100755 benchmarks/thirdparty/benchmark/.gitignore delete mode 100755 benchmarks/thirdparty/benchmark/.travis-libcxx-setup.sh delete mode 100755 benchmarks/thirdparty/benchmark/.travis.yml delete mode 100755 benchmarks/thirdparty/benchmark/.ycm_extra_conf.py delete mode 100755 benchmarks/thirdparty/benchmark/AUTHORS delete mode 100755 benchmarks/thirdparty/benchmark/BUILD.bazel delete mode 100755 benchmarks/thirdparty/benchmark/CMakeLists.txt delete mode 100755 benchmarks/thirdparty/benchmark/CONTRIBUTING.md delete mode 100755 benchmarks/thirdparty/benchmark/CONTRIBUTORS delete mode 100755 benchmarks/thirdparty/benchmark/LICENSE delete mode 100755 benchmarks/thirdparty/benchmark/README.md delete mode 100755 benchmarks/thirdparty/benchmark/WORKSPACE delete mode 100755 benchmarks/thirdparty/benchmark/_config.yml delete mode 100755 benchmarks/thirdparty/benchmark/appveyor.yml delete mode 100755 benchmarks/thirdparty/benchmark/bindings/python/build_defs.bzl delete mode 100755 benchmarks/thirdparty/benchmark/bindings/python/google_benchmark/__init__.py delete mode 100755 benchmarks/thirdparty/benchmark/bindings/python/google_benchmark/benchmark.cc delete mode 100755 benchmarks/thirdparty/benchmark/bindings/python/google_benchmark/example.py delete mode 100755 benchmarks/thirdparty/benchmark/bindings/python/pybind11.BUILD delete mode 100755 benchmarks/thirdparty/benchmark/bindings/python/python_headers.BUILD delete mode 100755 benchmarks/thirdparty/benchmark/bindings/python/requirements.txt delete mode 100755 benchmarks/thirdparty/benchmark/cmake/AddCXXCompilerFlag.cmake delete mode 100755 benchmarks/thirdparty/benchmark/cmake/CXXFeatureCheck.cmake delete mode 100755 benchmarks/thirdparty/benchmark/cmake/Config.cmake.in delete mode 100755 benchmarks/thirdparty/benchmark/cmake/GetGitVersion.cmake delete mode 100755 benchmarks/thirdparty/benchmark/cmake/GoogleTest.cmake delete mode 100755 benchmarks/thirdparty/benchmark/cmake/GoogleTest.cmake.in delete mode 100755 benchmarks/thirdparty/benchmark/cmake/benchmark.pc.in delete mode 100755 benchmarks/thirdparty/benchmark/cmake/gnu_posix_regex.cpp delete mode 100755 benchmarks/thirdparty/benchmark/cmake/llvm-toolchain.cmake delete mode 100755 benchmarks/thirdparty/benchmark/cmake/posix_regex.cpp delete mode 100755 benchmarks/thirdparty/benchmark/cmake/split_list.cmake delete mode 100755 benchmarks/thirdparty/benchmark/cmake/std_regex.cpp delete mode 100755 benchmarks/thirdparty/benchmark/cmake/steady_clock.cpp delete mode 100755 benchmarks/thirdparty/benchmark/cmake/thread_safety_attributes.cpp delete mode 100755 benchmarks/thirdparty/benchmark/conan/CMakeLists.txt delete mode 100755 benchmarks/thirdparty/benchmark/conan/test_package/CMakeLists.txt delete mode 100755 benchmarks/thirdparty/benchmark/conan/test_package/conanfile.py delete mode 100755 benchmarks/thirdparty/benchmark/conan/test_package/test_package.cpp delete mode 100755 benchmarks/thirdparty/benchmark/conanfile.py delete mode 100755 benchmarks/thirdparty/benchmark/dependencies.md delete mode 100755 benchmarks/thirdparty/benchmark/docs/AssemblyTests.md delete mode 100755 benchmarks/thirdparty/benchmark/docs/_config.yml delete mode 100755 benchmarks/thirdparty/benchmark/docs/releasing.md delete mode 100755 benchmarks/thirdparty/benchmark/docs/tools.md delete mode 100755 benchmarks/thirdparty/benchmark/include/benchmark/benchmark.h delete mode 100755 benchmarks/thirdparty/benchmark/setup.py delete mode 100755 benchmarks/thirdparty/benchmark/src/CMakeLists.txt delete mode 100755 benchmarks/thirdparty/benchmark/src/arraysize.h delete mode 100755 benchmarks/thirdparty/benchmark/src/benchmark.cc delete mode 100755 benchmarks/thirdparty/benchmark/src/benchmark_api_internal.cc delete mode 100755 benchmarks/thirdparty/benchmark/src/benchmark_api_internal.h delete mode 100755 benchmarks/thirdparty/benchmark/src/benchmark_main.cc delete mode 100755 benchmarks/thirdparty/benchmark/src/benchmark_name.cc delete mode 100755 benchmarks/thirdparty/benchmark/src/benchmark_register.cc delete mode 100755 benchmarks/thirdparty/benchmark/src/benchmark_register.h delete mode 100755 benchmarks/thirdparty/benchmark/src/benchmark_runner.cc delete mode 100755 benchmarks/thirdparty/benchmark/src/benchmark_runner.h delete mode 100755 benchmarks/thirdparty/benchmark/src/check.h delete mode 100755 benchmarks/thirdparty/benchmark/src/colorprint.cc delete mode 100755 benchmarks/thirdparty/benchmark/src/colorprint.h delete mode 100755 benchmarks/thirdparty/benchmark/src/commandlineflags.cc delete mode 100755 benchmarks/thirdparty/benchmark/src/commandlineflags.h delete mode 100755 benchmarks/thirdparty/benchmark/src/complexity.cc delete mode 100755 benchmarks/thirdparty/benchmark/src/complexity.h delete mode 100755 benchmarks/thirdparty/benchmark/src/console_reporter.cc delete mode 100755 benchmarks/thirdparty/benchmark/src/counter.cc delete mode 100755 benchmarks/thirdparty/benchmark/src/counter.h delete mode 100755 benchmarks/thirdparty/benchmark/src/csv_reporter.cc delete mode 100755 benchmarks/thirdparty/benchmark/src/cycleclock.h delete mode 100755 benchmarks/thirdparty/benchmark/src/internal_macros.h delete mode 100755 benchmarks/thirdparty/benchmark/src/json_reporter.cc delete mode 100755 benchmarks/thirdparty/benchmark/src/log.h delete mode 100755 benchmarks/thirdparty/benchmark/src/mutex.h delete mode 100755 benchmarks/thirdparty/benchmark/src/re.h delete mode 100755 benchmarks/thirdparty/benchmark/src/reporter.cc delete mode 100755 benchmarks/thirdparty/benchmark/src/sleep.cc delete mode 100755 benchmarks/thirdparty/benchmark/src/sleep.h delete mode 100755 benchmarks/thirdparty/benchmark/src/statistics.cc delete mode 100755 benchmarks/thirdparty/benchmark/src/statistics.h delete mode 100755 benchmarks/thirdparty/benchmark/src/string_util.cc delete mode 100755 benchmarks/thirdparty/benchmark/src/string_util.h delete mode 100755 benchmarks/thirdparty/benchmark/src/sysinfo.cc delete mode 100755 benchmarks/thirdparty/benchmark/src/thread_manager.h delete mode 100755 benchmarks/thirdparty/benchmark/src/thread_timer.h delete mode 100755 benchmarks/thirdparty/benchmark/src/timers.cc delete mode 100755 benchmarks/thirdparty/benchmark/src/timers.h delete mode 100755 benchmarks/thirdparty/benchmark/test/AssemblyTests.cmake delete mode 100755 benchmarks/thirdparty/benchmark/test/CMakeLists.txt delete mode 100755 benchmarks/thirdparty/benchmark/test/args_product_test.cc delete mode 100755 benchmarks/thirdparty/benchmark/test/basic_test.cc delete mode 100755 benchmarks/thirdparty/benchmark/test/benchmark_gtest.cc delete mode 100755 benchmarks/thirdparty/benchmark/test/benchmark_name_gtest.cc delete mode 100755 benchmarks/thirdparty/benchmark/test/benchmark_test.cc delete mode 100755 benchmarks/thirdparty/benchmark/test/clobber_memory_assembly_test.cc delete mode 100755 benchmarks/thirdparty/benchmark/test/commandlineflags_gtest.cc delete mode 100755 benchmarks/thirdparty/benchmark/test/complexity_test.cc delete mode 100755 benchmarks/thirdparty/benchmark/test/cxx03_test.cc delete mode 100755 benchmarks/thirdparty/benchmark/test/diagnostics_test.cc delete mode 100755 benchmarks/thirdparty/benchmark/test/display_aggregates_only_test.cc delete mode 100755 benchmarks/thirdparty/benchmark/test/donotoptimize_assembly_test.cc delete mode 100755 benchmarks/thirdparty/benchmark/test/donotoptimize_test.cc delete mode 100755 benchmarks/thirdparty/benchmark/test/filter_test.cc delete mode 100755 benchmarks/thirdparty/benchmark/test/fixture_test.cc delete mode 100755 benchmarks/thirdparty/benchmark/test/internal_threading_test.cc delete mode 100755 benchmarks/thirdparty/benchmark/test/link_main_test.cc delete mode 100755 benchmarks/thirdparty/benchmark/test/map_test.cc delete mode 100755 benchmarks/thirdparty/benchmark/test/memory_manager_test.cc delete mode 100755 benchmarks/thirdparty/benchmark/test/multiple_ranges_test.cc delete mode 100755 benchmarks/thirdparty/benchmark/test/options_test.cc delete mode 100755 benchmarks/thirdparty/benchmark/test/output_test.h delete mode 100755 benchmarks/thirdparty/benchmark/test/output_test_helper.cc delete mode 100755 benchmarks/thirdparty/benchmark/test/register_benchmark_test.cc delete mode 100755 benchmarks/thirdparty/benchmark/test/report_aggregates_only_test.cc delete mode 100755 benchmarks/thirdparty/benchmark/test/reporter_output_test.cc delete mode 100755 benchmarks/thirdparty/benchmark/test/skip_with_error_test.cc delete mode 100755 benchmarks/thirdparty/benchmark/test/state_assembly_test.cc delete mode 100755 benchmarks/thirdparty/benchmark/test/statistics_gtest.cc delete mode 100755 benchmarks/thirdparty/benchmark/test/string_util_gtest.cc delete mode 100755 benchmarks/thirdparty/benchmark/test/templated_fixture_test.cc delete mode 100755 benchmarks/thirdparty/benchmark/test/user_counters_tabular_test.cc delete mode 100755 benchmarks/thirdparty/benchmark/test/user_counters_test.cc delete mode 100755 benchmarks/thirdparty/benchmark/test/user_counters_thousands_test.cc delete mode 100755 benchmarks/thirdparty/benchmark/tools/compare.py delete mode 100755 benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test1_run1.json delete mode 100755 benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test1_run2.json delete mode 100755 benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test2_run.json delete mode 100755 benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test3_run0.json delete mode 100755 benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test3_run1.json delete mode 100755 benchmarks/thirdparty/benchmark/tools/gbench/__init__.py delete mode 100755 benchmarks/thirdparty/benchmark/tools/gbench/report.py delete mode 100755 benchmarks/thirdparty/benchmark/tools/gbench/util.py delete mode 100755 benchmarks/thirdparty/benchmark/tools/requirements.txt delete mode 100755 benchmarks/thirdparty/benchmark/tools/strip_asm.py diff --git a/benchmarks/CMakeLists.txt b/benchmarks/CMakeLists.txt index 86063dbada..ee4db1912d 100644 --- a/benchmarks/CMakeLists.txt +++ b/benchmarks/CMakeLists.txt @@ -1,18 +1,25 @@ -cmake_minimum_required(VERSION 3.8) +cmake_minimum_required(VERSION 3.11) project(JSON_Benchmarks LANGUAGES CXX) # set compiler flags if((CMAKE_CXX_COMPILER_ID MATCHES GNU) OR (CMAKE_CXX_COMPILER_ID MATCHES Clang)) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -flto -DNDEBUG -O3") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -flto -DNDEBUG -O3") endif() # configure Google Benchmarks -set(BENCHMARK_ENABLE_TESTING OFF CACHE INTERNAL "" FORCE) -add_subdirectory(thirdparty/benchmark) +include(FetchContent) +FetchContent_Declare( + benchmark + GIT_REPOSITORY https://github.com/google/benchmark.git + GIT_SHALLOW TRUE +) -# header directories -include_directories(thirdparty) -include_directories(${CMAKE_SOURCE_DIR}/../single_include) +FetchContent_GetProperties(benchmark) +if(NOT benchmark_POPULATED) + FetchContent_Populate(benchmark) + set(BENCHMARK_ENABLE_TESTING OFF CACHE INTERNAL "" FORCE) + add_subdirectory(${benchmark_SOURCE_DIR} ${benchmark_BINARY_DIR}) +endif() # download test data include(${CMAKE_SOURCE_DIR}/../cmake/download_test_data.cmake) @@ -22,4 +29,4 @@ add_executable(json_benchmarks src/benchmarks.cpp) target_compile_features(json_benchmarks PRIVATE cxx_std_11) target_link_libraries(json_benchmarks benchmark ${CMAKE_THREAD_LIBS_INIT}) add_dependencies(json_benchmarks download_test_data) -target_include_directories(json_benchmarks PRIVATE ${CMAKE_BINARY_DIR}/include) +target_include_directories(json_benchmarks PRIVATE ${CMAKE_SOURCE_DIR}/../single_include ${CMAKE_BINARY_DIR}/include) diff --git a/benchmarks/thirdparty/benchmark/.clang-format b/benchmarks/thirdparty/benchmark/.clang-format deleted file mode 100755 index e7d00feaa0..0000000000 --- a/benchmarks/thirdparty/benchmark/.clang-format +++ /dev/null @@ -1,5 +0,0 @@ ---- -Language: Cpp -BasedOnStyle: Google -PointerAlignment: Left -... diff --git a/benchmarks/thirdparty/benchmark/.github/ISSUE_TEMPLATE/bug_report.md b/benchmarks/thirdparty/benchmark/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100755 index 6c2ced9b2e..0000000000 --- a/benchmarks/thirdparty/benchmark/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -name: Bug report -about: Create a report to help us improve -title: "[BUG]" -labels: '' -assignees: '' - ---- - -**Describe the bug** -A clear and concise description of what the bug is. - -**System** -Which OS, compiler, and compiler version are you using: - - OS: - - Compiler and version: - -**To reproduce** -Steps to reproduce the behavior: -1. sync to commit ... -2. cmake/bazel... -3. make ... -4. See error - -**Expected behavior** -A clear and concise description of what you expected to happen. - -**Screenshots** -If applicable, add screenshots to help explain your problem. - -**Additional context** -Add any other context about the problem here. diff --git a/benchmarks/thirdparty/benchmark/.github/ISSUE_TEMPLATE/feature_request.md b/benchmarks/thirdparty/benchmark/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100755 index 9e8ab6a673..0000000000 --- a/benchmarks/thirdparty/benchmark/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -name: Feature request -about: Suggest an idea for this project -title: "[FR]" -labels: '' -assignees: '' - ---- - -**Is your feature request related to a problem? Please describe.** -A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] - -**Describe the solution you'd like** -A clear and concise description of what you want to happen. - -**Describe alternatives you've considered** -A clear and concise description of any alternative solutions or features you've considered. - -**Additional context** -Add any other context or screenshots about the feature request here. diff --git a/benchmarks/thirdparty/benchmark/.github/workflows/build-and-test.yml b/benchmarks/thirdparty/benchmark/.github/workflows/build-and-test.yml deleted file mode 100755 index f0f0626d74..0000000000 --- a/benchmarks/thirdparty/benchmark/.github/workflows/build-and-test.yml +++ /dev/null @@ -1,38 +0,0 @@ -name: build-and-test - -on: - push: - branches: [ master ] - pull_request: - branches: [ master ] - -jobs: - job: - # TODO(dominic): Extend this to include compiler and set through env: CC/CXX. - name: ${{ matrix.os }}.${{ matrix.build_type }} - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - os: [ubuntu-latest, ubuntu-16.04, ubuntu-20.04, macos-latest, windows-latest] - build_type: ['Release', 'Debug'] - steps: - - uses: actions/checkout@v2 - - - name: create build environment - run: cmake -E make_directory ${{ runner.workspace }}/_build - - - name: configure cmake - shell: bash - working-directory: ${{ runner.workspace }}/_build - run: cmake -DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} - - - name: build - shell: bash - working-directory: ${{ runner.workspace }}/_build - run: cmake --build . --config ${{ matrix.build_type }} - - - name: test - shell: bash - working-directory: ${{ runner.workspace }}/_build - run: ctest -C ${{ matrix.build_type }} diff --git a/benchmarks/thirdparty/benchmark/.github/workflows/pylint.yml b/benchmarks/thirdparty/benchmark/.github/workflows/pylint.yml deleted file mode 100755 index c8696749f3..0000000000 --- a/benchmarks/thirdparty/benchmark/.github/workflows/pylint.yml +++ /dev/null @@ -1,26 +0,0 @@ -name: pylint - -on: - push: - branches: [ master ] - pull_request: - branches: [ master ] - -jobs: - pylint: - - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v2 - - name: Set up Python 3.8 - uses: actions/setup-python@v1 - with: - python-version: 3.8 - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install pylint pylint-exit conan - - name: Run pylint - run: | - pylint `find . -name '*.py'|xargs` || pylint-exit $? diff --git a/benchmarks/thirdparty/benchmark/.github/workflows/test_bindings.yml b/benchmarks/thirdparty/benchmark/.github/workflows/test_bindings.yml deleted file mode 100755 index 273d7f93ee..0000000000 --- a/benchmarks/thirdparty/benchmark/.github/workflows/test_bindings.yml +++ /dev/null @@ -1,24 +0,0 @@ -name: test-bindings - -on: - push: - branches: [master] - pull_request: - branches: [master] - -jobs: - python_bindings: - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v2 - - name: Set up Python - uses: actions/setup-python@v1 - with: - python-version: 3.8 - - name: Install benchmark - run: - python setup.py install - - name: Run example bindings - run: - python bindings/python/google_benchmark/example.py diff --git a/benchmarks/thirdparty/benchmark/.gitignore b/benchmarks/thirdparty/benchmark/.gitignore deleted file mode 100755 index be55d774e2..0000000000 --- a/benchmarks/thirdparty/benchmark/.gitignore +++ /dev/null @@ -1,66 +0,0 @@ -*.a -*.so -*.so.?* -*.dll -*.exe -*.dylib -*.cmake -!/cmake/*.cmake -!/test/AssemblyTests.cmake -*~ -*.swp -*.pyc -__pycache__ - -# lcov -*.lcov -/lcov - -# cmake files. -/Testing -CMakeCache.txt -CMakeFiles/ -cmake_install.cmake - -# makefiles. -Makefile - -# in-source build. -bin/ -lib/ -/test/*_test - -# exuberant ctags. -tags - -# YouCompleteMe configuration. -.ycm_extra_conf.pyc - -# ninja generated files. -.ninja_deps -.ninja_log -build.ninja -install_manifest.txt -rules.ninja - -# bazel output symlinks. -bazel-* - -# out-of-source build top-level folders. -build/ -_build/ -build*/ - -# in-source dependencies -/googletest/ - -# Visual Studio 2015/2017 cache/options directory -.vs/ -CMakeSettings.json - -# Visual Studio Code cache/options directory -.vscode/ - -# Python build stuff -dist/ -*.egg-info* diff --git a/benchmarks/thirdparty/benchmark/.travis-libcxx-setup.sh b/benchmarks/thirdparty/benchmark/.travis-libcxx-setup.sh deleted file mode 100755 index a591743c6a..0000000000 --- a/benchmarks/thirdparty/benchmark/.travis-libcxx-setup.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash - -# Install a newer CMake version -curl -sSL https://cmake.org/files/v3.6/cmake-3.6.1-Linux-x86_64.sh -o install-cmake.sh -chmod +x install-cmake.sh -sudo ./install-cmake.sh --prefix=/usr/local --skip-license - -# Checkout LLVM sources -git clone --depth=1 https://github.com/llvm-mirror/llvm.git llvm-source -git clone --depth=1 https://github.com/llvm-mirror/libcxx.git llvm-source/projects/libcxx -git clone --depth=1 https://github.com/llvm-mirror/libcxxabi.git llvm-source/projects/libcxxabi - -# Setup libc++ options -if [ -z "$BUILD_32_BITS" ]; then - export BUILD_32_BITS=OFF && echo disabling 32 bit build -fi - -# Build and install libc++ (Use unstable ABI for better sanitizer coverage) -mkdir llvm-build && cd llvm-build -cmake -DCMAKE_C_COMPILER=${C_COMPILER} -DCMAKE_CXX_COMPILER=${COMPILER} \ - -DCMAKE_BUILD_TYPE=RelWithDebInfo -DCMAKE_INSTALL_PREFIX=/usr \ - -DLIBCXX_ABI_UNSTABLE=ON \ - -DLLVM_USE_SANITIZER=${LIBCXX_SANITIZER} \ - -DLLVM_BUILD_32_BITS=${BUILD_32_BITS} \ - ../llvm-source -make cxx -j2 -sudo make install-cxxabi install-cxx -cd ../ diff --git a/benchmarks/thirdparty/benchmark/.travis.yml b/benchmarks/thirdparty/benchmark/.travis.yml deleted file mode 100755 index 36e343dbfe..0000000000 --- a/benchmarks/thirdparty/benchmark/.travis.yml +++ /dev/null @@ -1,231 +0,0 @@ -sudo: required -dist: trusty -language: cpp - -matrix: - include: - - compiler: gcc - addons: - apt: - packages: - - lcov - env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Coverage - - compiler: gcc - env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Debug - - compiler: gcc - env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Release - - compiler: gcc - addons: - apt: - packages: - - g++-multilib - - libc6:i386 - env: - - COMPILER=g++ - - C_COMPILER=gcc - - BUILD_TYPE=Debug - - BUILD_32_BITS=ON - - EXTRA_FLAGS="-m32" - - compiler: gcc - addons: - apt: - packages: - - g++-multilib - - libc6:i386 - env: - - COMPILER=g++ - - C_COMPILER=gcc - - BUILD_TYPE=Release - - BUILD_32_BITS=ON - - EXTRA_FLAGS="-m32" - - compiler: gcc - env: - - INSTALL_GCC6_FROM_PPA=1 - - COMPILER=g++-6 C_COMPILER=gcc-6 BUILD_TYPE=Debug - - ENABLE_SANITIZER=1 - - EXTRA_FLAGS="-fno-omit-frame-pointer -g -O2 -fsanitize=undefined,address -fuse-ld=gold" - - compiler: clang - env: COMPILER=clang++ C_COMPILER=clang BUILD_TYPE=Debug - - compiler: clang - env: COMPILER=clang++ C_COMPILER=clang BUILD_TYPE=Release - # Clang w/ libc++ - - compiler: clang - dist: xenial - addons: - apt: - packages: - clang-3.8 - env: - - INSTALL_GCC6_FROM_PPA=1 - - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug - - LIBCXX_BUILD=1 - - EXTRA_CXX_FLAGS="-stdlib=libc++" - - compiler: clang - dist: xenial - addons: - apt: - packages: - clang-3.8 - env: - - INSTALL_GCC6_FROM_PPA=1 - - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Release - - LIBCXX_BUILD=1 - - EXTRA_CXX_FLAGS="-stdlib=libc++" - # Clang w/ 32bit libc++ - - compiler: clang - dist: xenial - addons: - apt: - packages: - - clang-3.8 - - g++-multilib - - libc6:i386 - env: - - INSTALL_GCC6_FROM_PPA=1 - - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug - - LIBCXX_BUILD=1 - - BUILD_32_BITS=ON - - EXTRA_FLAGS="-m32" - - EXTRA_CXX_FLAGS="-stdlib=libc++" - # Clang w/ 32bit libc++ - - compiler: clang - dist: xenial - addons: - apt: - packages: - - clang-3.8 - - g++-multilib - - libc6:i386 - env: - - INSTALL_GCC6_FROM_PPA=1 - - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Release - - LIBCXX_BUILD=1 - - BUILD_32_BITS=ON - - EXTRA_FLAGS="-m32" - - EXTRA_CXX_FLAGS="-stdlib=libc++" - # Clang w/ libc++, ASAN, UBSAN - - compiler: clang - dist: xenial - addons: - apt: - packages: - clang-3.8 - env: - - INSTALL_GCC6_FROM_PPA=1 - - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug - - LIBCXX_BUILD=1 LIBCXX_SANITIZER="Undefined;Address" - - ENABLE_SANITIZER=1 - - EXTRA_FLAGS="-g -O2 -fno-omit-frame-pointer -fsanitize=undefined,address -fno-sanitize-recover=all" - - EXTRA_CXX_FLAGS="-stdlib=libc++" - - UBSAN_OPTIONS=print_stacktrace=1 - # Clang w/ libc++ and MSAN - - compiler: clang - dist: xenial - addons: - apt: - packages: - clang-3.8 - env: - - INSTALL_GCC6_FROM_PPA=1 - - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug - - LIBCXX_BUILD=1 LIBCXX_SANITIZER=MemoryWithOrigins - - ENABLE_SANITIZER=1 - - EXTRA_FLAGS="-g -O2 -fno-omit-frame-pointer -fsanitize=memory -fsanitize-memory-track-origins" - - EXTRA_CXX_FLAGS="-stdlib=libc++" - # Clang w/ libc++ and MSAN - - compiler: clang - dist: xenial - addons: - apt: - packages: - clang-3.8 - env: - - INSTALL_GCC6_FROM_PPA=1 - - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=RelWithDebInfo - - LIBCXX_BUILD=1 LIBCXX_SANITIZER=Thread - - ENABLE_SANITIZER=1 - - EXTRA_FLAGS="-g -O2 -fno-omit-frame-pointer -fsanitize=thread -fno-sanitize-recover=all" - - EXTRA_CXX_FLAGS="-stdlib=libc++" - - os: osx - osx_image: xcode8.3 - compiler: clang - env: - - COMPILER=clang++ BUILD_TYPE=Debug - - os: osx - osx_image: xcode8.3 - compiler: clang - env: - - COMPILER=clang++ BUILD_TYPE=Release - - os: osx - osx_image: xcode8.3 - compiler: clang - env: - - COMPILER=clang++ - - BUILD_TYPE=Release - - BUILD_32_BITS=ON - - EXTRA_FLAGS="-m32" - - os: osx - osx_image: xcode9.4 - compiler: gcc - env: - - COMPILER=g++-7 C_COMPILER=gcc-7 BUILD_TYPE=Debug - -before_script: - - if [ -n "${LIBCXX_BUILD}" ]; then - source .travis-libcxx-setup.sh; - fi - - if [ -n "${ENABLE_SANITIZER}" ]; then - export EXTRA_OPTIONS="-DBENCHMARK_ENABLE_ASSEMBLY_TESTS=OFF"; - else - export EXTRA_OPTIONS=""; - fi - - mkdir -p build && cd build - -before_install: - - if [ -z "$BUILD_32_BITS" ]; then - export BUILD_32_BITS=OFF && echo disabling 32 bit build; - fi - - if [ -n "${INSTALL_GCC6_FROM_PPA}" ]; then - sudo add-apt-repository -y "ppa:ubuntu-toolchain-r/test"; - sudo apt-get update --option Acquire::Retries=100 --option Acquire::http::Timeout="60"; - fi - -install: - - if [ -n "${INSTALL_GCC6_FROM_PPA}" ]; then - travis_wait sudo -E apt-get -yq --no-install-suggests --no-install-recommends install g++-6; - fi - - if [ "${TRAVIS_OS_NAME}" == "linux" -a "${BUILD_32_BITS}" == "OFF" ]; then - travis_wait sudo -E apt-get -y --no-install-suggests --no-install-recommends install llvm-3.9-tools; - sudo cp /usr/lib/llvm-3.9/bin/FileCheck /usr/local/bin/; - fi - - if [ "${BUILD_TYPE}" == "Coverage" -a "${TRAVIS_OS_NAME}" == "linux" ]; then - PATH=~/.local/bin:${PATH}; - pip install --user --upgrade pip; - travis_wait pip install --user cpp-coveralls; - fi - - if [ "${C_COMPILER}" == "gcc-7" -a "${TRAVIS_OS_NAME}" == "osx" ]; then - rm -f /usr/local/include/c++; - brew update; - travis_wait brew install gcc@7; - fi - - if [ "${TRAVIS_OS_NAME}" == "linux" ]; then - sudo apt-get update -qq; - sudo apt-get install -qq unzip cmake3; - wget https://github.com/bazelbuild/bazel/releases/download/3.2.0/bazel-3.2.0-installer-linux-x86_64.sh --output-document bazel-installer.sh; - travis_wait sudo bash bazel-installer.sh; - fi - - if [ "${TRAVIS_OS_NAME}" == "osx" ]; then - curl -L -o bazel-installer.sh https://github.com/bazelbuild/bazel/releases/download/3.2.0/bazel-3.2.0-installer-darwin-x86_64.sh; - travis_wait sudo bash bazel-installer.sh; - fi - -script: - - cmake -DCMAKE_C_COMPILER=${C_COMPILER} -DCMAKE_CXX_COMPILER=${COMPILER} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DCMAKE_C_FLAGS="${EXTRA_FLAGS}" -DCMAKE_CXX_FLAGS="${EXTRA_FLAGS} ${EXTRA_CXX_FLAGS}" -DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON -DBENCHMARK_BUILD_32_BITS=${BUILD_32_BITS} ${EXTRA_OPTIONS} .. - - make - - ctest -C ${BUILD_TYPE} --output-on-failure - - bazel test -c dbg --define google_benchmark.have_regex=posix --announce_rc --verbose_failures --test_output=errors --keep_going //test/... - -after_success: - - if [ "${BUILD_TYPE}" == "Coverage" -a "${TRAVIS_OS_NAME}" == "linux" ]; then - coveralls --include src --include include --gcov-options '\-lp' --root .. --build-root .; - fi diff --git a/benchmarks/thirdparty/benchmark/.ycm_extra_conf.py b/benchmarks/thirdparty/benchmark/.ycm_extra_conf.py deleted file mode 100755 index 5649ddcc74..0000000000 --- a/benchmarks/thirdparty/benchmark/.ycm_extra_conf.py +++ /dev/null @@ -1,115 +0,0 @@ -import os -import ycm_core - -# These are the compilation flags that will be used in case there's no -# compilation database set (by default, one is not set). -# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR. -flags = [ -'-Wall', -'-Werror', -'-pedantic-errors', -'-std=c++0x', -'-fno-strict-aliasing', -'-O3', -'-DNDEBUG', -# ...and the same thing goes for the magic -x option which specifies the -# language that the files to be compiled are written in. This is mostly -# relevant for c++ headers. -# For a C project, you would set this to 'c' instead of 'c++'. -'-x', 'c++', -'-I', 'include', -'-isystem', '/usr/include', -'-isystem', '/usr/local/include', -] - - -# Set this to the absolute path to the folder (NOT the file!) containing the -# compile_commands.json file to use that instead of 'flags'. See here for -# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html -# -# Most projects will NOT need to set this to anything; you can just change the -# 'flags' list of compilation flags. Notice that YCM itself uses that approach. -compilation_database_folder = '' - -if os.path.exists( compilation_database_folder ): - database = ycm_core.CompilationDatabase( compilation_database_folder ) -else: - database = None - -SOURCE_EXTENSIONS = [ '.cc' ] - -def DirectoryOfThisScript(): - return os.path.dirname( os.path.abspath( __file__ ) ) - - -def MakeRelativePathsInFlagsAbsolute( flags, working_directory ): - if not working_directory: - return list( flags ) - new_flags = [] - make_next_absolute = False - path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ] - for flag in flags: - new_flag = flag - - if make_next_absolute: - make_next_absolute = False - if not flag.startswith( '/' ): - new_flag = os.path.join( working_directory, flag ) - - for path_flag in path_flags: - if flag == path_flag: - make_next_absolute = True - break - - if flag.startswith( path_flag ): - path = flag[ len( path_flag ): ] - new_flag = path_flag + os.path.join( working_directory, path ) - break - - if new_flag: - new_flags.append( new_flag ) - return new_flags - - -def IsHeaderFile( filename ): - extension = os.path.splitext( filename )[ 1 ] - return extension in [ '.h', '.hxx', '.hpp', '.hh' ] - - -def GetCompilationInfoForFile( filename ): - # The compilation_commands.json file generated by CMake does not have entries - # for header files. So we do our best by asking the db for flags for a - # corresponding source file, if any. If one exists, the flags for that file - # should be good enough. - if IsHeaderFile( filename ): - basename = os.path.splitext( filename )[ 0 ] - for extension in SOURCE_EXTENSIONS: - replacement_file = basename + extension - if os.path.exists( replacement_file ): - compilation_info = database.GetCompilationInfoForFile( - replacement_file ) - if compilation_info.compiler_flags_: - return compilation_info - return None - return database.GetCompilationInfoForFile( filename ) - - -def FlagsForFile( filename, **kwargs ): - if database: - # Bear in mind that compilation_info.compiler_flags_ does NOT return a - # python list, but a "list-like" StringVec object - compilation_info = GetCompilationInfoForFile( filename ) - if not compilation_info: - return None - - final_flags = MakeRelativePathsInFlagsAbsolute( - compilation_info.compiler_flags_, - compilation_info.compiler_working_dir_ ) - else: - relative_to = DirectoryOfThisScript() - final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to ) - - return { - 'flags': final_flags, - 'do_cache': True - } diff --git a/benchmarks/thirdparty/benchmark/AUTHORS b/benchmarks/thirdparty/benchmark/AUTHORS deleted file mode 100755 index e353b53bf3..0000000000 --- a/benchmarks/thirdparty/benchmark/AUTHORS +++ /dev/null @@ -1,57 +0,0 @@ -# This is the official list of benchmark authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. -# -# Names should be added to this file as: -# Name or Organization -# The email address is not required for organizations. -# -# Please keep the list sorted. - -Albert Pretorius -Alex Steele -Andriy Berestovskyy -Arne Beer -Carto -Christian Wassermann -Christopher Seymour -Colin Braley -Daniel Harvey -David Coeurjolly -Deniz Evrenci -Dirac Research -Dominik Czarnota -Eric Backus -Eric Fiselier -Eugene Zhuk -Evgeny Safronov -Federico Ficarelli -Felix Homann -GergΕ‘ SzitΓ‘r -Google Inc. -International Business Machines Corporation -Ismael Jimenez Martinez -Jern-Kuan Leong -JianXiong Zhou -Joao Paulo Magalhaes -Jordan Williams -Jussi Knuuttila -Kaito Udagawa -Kishan Kumar -Lei Xu -Matt Clarkson -Maxim Vafin -MongoDB Inc. -Nick Hutchinson -Oleksandr Sochka -Ori Livneh -Paul Redmond -Radoslav Yovchev -Roman Lebedev -Sayan Bhattacharjee -Shuo Chen -Steinar H. Gunderson -Stripe, Inc. -Yixuan Qiu -Yusuke Suzuki -Zbigniew Skowron diff --git a/benchmarks/thirdparty/benchmark/BUILD.bazel b/benchmarks/thirdparty/benchmark/BUILD.bazel deleted file mode 100755 index eb35b62730..0000000000 --- a/benchmarks/thirdparty/benchmark/BUILD.bazel +++ /dev/null @@ -1,44 +0,0 @@ -load("@rules_cc//cc:defs.bzl", "cc_library") - -licenses(["notice"]) - -config_setting( - name = "windows", - values = { - "cpu": "x64_windows", - }, - visibility = [":__subpackages__"], -) - -cc_library( - name = "benchmark", - srcs = glob( - [ - "src/*.cc", - "src/*.h", - ], - exclude = ["src/benchmark_main.cc"], - ), - hdrs = ["include/benchmark/benchmark.h"], - linkopts = select({ - ":windows": ["-DEFAULTLIB:shlwapi.lib"], - "//conditions:default": ["-pthread"], - }), - strip_include_prefix = "include", - visibility = ["//visibility:public"], -) - -cc_library( - name = "benchmark_main", - srcs = ["src/benchmark_main.cc"], - hdrs = ["include/benchmark/benchmark.h"], - strip_include_prefix = "include", - visibility = ["//visibility:public"], - deps = [":benchmark"], -) - -cc_library( - name = "benchmark_internal_headers", - hdrs = glob(["src/*.h"]), - visibility = ["//test:__pkg__"], -) diff --git a/benchmarks/thirdparty/benchmark/CMakeLists.txt b/benchmarks/thirdparty/benchmark/CMakeLists.txt deleted file mode 100755 index a157666148..0000000000 --- a/benchmarks/thirdparty/benchmark/CMakeLists.txt +++ /dev/null @@ -1,286 +0,0 @@ -cmake_minimum_required (VERSION 3.5.1) - -foreach(p - CMP0048 # OK to clear PROJECT_VERSION on project() - CMP0054 # CMake 3.1 - CMP0056 # export EXE_LINKER_FLAGS to try_run - CMP0057 # Support no if() IN_LIST operator - CMP0063 # Honor visibility properties for all targets - CMP0077 # Allow option() overrides in importing projects - ) - if(POLICY ${p}) - cmake_policy(SET ${p} NEW) - endif() -endforeach() - -project (benchmark CXX) - -option(BENCHMARK_ENABLE_TESTING "Enable testing of the benchmark library." ON) -option(BENCHMARK_ENABLE_EXCEPTIONS "Enable the use of exceptions in the benchmark library." ON) -option(BENCHMARK_ENABLE_LTO "Enable link time optimisation of the benchmark library." OFF) -option(BENCHMARK_USE_LIBCXX "Build and test using libc++ as the standard library." OFF) -if(NOT MSVC) - option(BENCHMARK_BUILD_32_BITS "Build a 32 bit version of the library." OFF) -else() - set(BENCHMARK_BUILD_32_BITS OFF CACHE BOOL "Build a 32 bit version of the library - unsupported when using MSVC)" FORCE) -endif() -option(BENCHMARK_ENABLE_INSTALL "Enable installation of benchmark. (Projects embedding benchmark may want to turn this OFF.)" ON) - -# Allow unmet dependencies to be met using CMake's ExternalProject mechanics, which -# may require downloading the source code. -option(BENCHMARK_DOWNLOAD_DEPENDENCIES "Allow the downloading and in-tree building of unmet dependencies" OFF) - -# This option can be used to disable building and running unit tests which depend on gtest -# in cases where it is not possible to build or find a valid version of gtest. -option(BENCHMARK_ENABLE_GTEST_TESTS "Enable building the unit tests which depend on gtest" ON) - -set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON) -set(ENABLE_ASSEMBLY_TESTS_DEFAULT OFF) -function(should_enable_assembly_tests) - if(CMAKE_BUILD_TYPE) - string(TOLOWER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_LOWER) - if (${CMAKE_BUILD_TYPE_LOWER} MATCHES "coverage") - # FIXME: The --coverage flag needs to be removed when building assembly - # tests for this to work. - return() - endif() - endif() - if (MSVC) - return() - elseif(NOT CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64") - return() - elseif(NOT CMAKE_SIZEOF_VOID_P EQUAL 8) - # FIXME: Make these work on 32 bit builds - return() - elseif(BENCHMARK_BUILD_32_BITS) - # FIXME: Make these work on 32 bit builds - return() - endif() - find_program(LLVM_FILECHECK_EXE FileCheck) - if (LLVM_FILECHECK_EXE) - set(LLVM_FILECHECK_EXE "${LLVM_FILECHECK_EXE}" CACHE PATH "llvm filecheck" FORCE) - message(STATUS "LLVM FileCheck Found: ${LLVM_FILECHECK_EXE}") - else() - message(STATUS "Failed to find LLVM FileCheck") - return() - endif() - set(ENABLE_ASSEMBLY_TESTS_DEFAULT ON PARENT_SCOPE) -endfunction() -should_enable_assembly_tests() - -# This option disables the building and running of the assembly verification tests -option(BENCHMARK_ENABLE_ASSEMBLY_TESTS "Enable building and running the assembly tests" - ${ENABLE_ASSEMBLY_TESTS_DEFAULT}) - -# Make sure we can import out CMake functions -list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules") -list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake") - - -# Read the git tags to determine the project version -include(GetGitVersion) -get_git_version(GIT_VERSION) - -# Tell the user what versions we are using -string(REGEX MATCH "[0-9]+\\.[0-9]+\\.[0-9]+" VERSION ${GIT_VERSION}) -message(STATUS "Version: ${VERSION}") - -# The version of the libraries -set(GENERIC_LIB_VERSION ${VERSION}) -string(SUBSTRING ${VERSION} 0 1 GENERIC_LIB_SOVERSION) - -# Import our CMake modules -include(CheckCXXCompilerFlag) -include(AddCXXCompilerFlag) -include(CXXFeatureCheck) - -if (BENCHMARK_BUILD_32_BITS) - add_required_cxx_compiler_flag(-m32) -endif() - -if (MSVC) - # Turn compiler warnings up to 11 - string(REGEX REPLACE "[-/]W[1-4]" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W4") - add_definitions(-D_CRT_SECURE_NO_WARNINGS) - - if (NOT BENCHMARK_ENABLE_EXCEPTIONS) - add_cxx_compiler_flag(-EHs-) - add_cxx_compiler_flag(-EHa-) - add_definitions(-D_HAS_EXCEPTIONS=0) - endif() - # Link time optimisation - if (BENCHMARK_ENABLE_LTO) - set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /GL") - set(CMAKE_STATIC_LINKER_FLAGS_RELEASE "${CMAKE_STATIC_LINKER_FLAGS_RELEASE} /LTCG") - set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} /LTCG") - set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} /LTCG") - - set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} /GL") - string(REGEX REPLACE "[-/]INCREMENTAL" "/INCREMENTAL:NO" CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO}") - set(CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO} /LTCG") - string(REGEX REPLACE "[-/]INCREMENTAL" "/INCREMENTAL:NO" CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO}") - set(CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO} /LTCG") - string(REGEX REPLACE "[-/]INCREMENTAL" "/INCREMENTAL:NO" CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO}") - set(CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} /LTCG") - - set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL} /GL") - set(CMAKE_STATIC_LINKER_FLAGS_MINSIZEREL "${CMAKE_STATIC_LINKER_FLAGS_MINSIZEREL} /LTCG") - set(CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL "${CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL} /LTCG") - set(CMAKE_EXE_LINKER_FLAGS_MINSIZEREL "${CMAKE_EXE_LINKER_FLAGS_MINSIZEREL} /LTCG") - endif() -else() - # Try and enable C++11. Don't use C++14 because it doesn't work in some - # configurations. - add_cxx_compiler_flag(-std=c++11) - if (NOT HAVE_CXX_FLAG_STD_CXX11) - add_cxx_compiler_flag(-std=c++0x) - endif() - - # Turn compiler warnings up to 11 - add_cxx_compiler_flag(-Wall) - add_cxx_compiler_flag(-Wextra) - add_cxx_compiler_flag(-Wshadow) - add_cxx_compiler_flag(-Werror RELEASE) - add_cxx_compiler_flag(-Werror RELWITHDEBINFO) - add_cxx_compiler_flag(-Werror MINSIZEREL) - # Disabled until googletest (gmock) stops emitting variadic macro warnings - #add_cxx_compiler_flag(-pedantic) - #add_cxx_compiler_flag(-pedantic-errors) - add_cxx_compiler_flag(-Wshorten-64-to-32) - add_cxx_compiler_flag(-fstrict-aliasing) - # Disable warnings regarding deprecated parts of the library while building - # and testing those parts of the library. - add_cxx_compiler_flag(-Wno-deprecated-declarations) - if (CMAKE_CXX_COMPILER_ID STREQUAL "Intel") - # Intel silently ignores '-Wno-deprecated-declarations', - # warning no. 1786 must be explicitly disabled. - # See #631 for rationale. - add_cxx_compiler_flag(-wd1786) - endif() - # Disable deprecation warnings for release builds (when -Werror is enabled). - add_cxx_compiler_flag(-Wno-deprecated RELEASE) - add_cxx_compiler_flag(-Wno-deprecated RELWITHDEBINFO) - add_cxx_compiler_flag(-Wno-deprecated MINSIZEREL) - if (NOT BENCHMARK_ENABLE_EXCEPTIONS) - add_cxx_compiler_flag(-fno-exceptions) - endif() - - if (HAVE_CXX_FLAG_FSTRICT_ALIASING) - if (NOT CMAKE_CXX_COMPILER_ID STREQUAL "Intel") #ICC17u2: Many false positives for Wstrict-aliasing - add_cxx_compiler_flag(-Wstrict-aliasing) - endif() - endif() - # ICC17u2: overloaded virtual function "benchmark::Fixture::SetUp" is only partially overridden - # (because of deprecated overload) - add_cxx_compiler_flag(-wd654) - add_cxx_compiler_flag(-Wthread-safety) - if (HAVE_CXX_FLAG_WTHREAD_SAFETY) - cxx_feature_check(THREAD_SAFETY_ATTRIBUTES) - endif() - - # On most UNIX like platforms g++ and clang++ define _GNU_SOURCE as a - # predefined macro, which turns on all of the wonderful libc extensions. - # However g++ doesn't do this in Cygwin so we have to define it ourselfs - # since we depend on GNU/POSIX/BSD extensions. - if (CYGWIN) - add_definitions(-D_GNU_SOURCE=1) - endif() - - if (QNXNTO) - add_definitions(-D_QNX_SOURCE) - endif() - - # Link time optimisation - if (BENCHMARK_ENABLE_LTO) - add_cxx_compiler_flag(-flto) - if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") - find_program(GCC_AR gcc-ar) - if (GCC_AR) - set(CMAKE_AR ${GCC_AR}) - endif() - find_program(GCC_RANLIB gcc-ranlib) - if (GCC_RANLIB) - set(CMAKE_RANLIB ${GCC_RANLIB}) - endif() - elseif("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") - include(llvm-toolchain) - endif() - endif() - - # Coverage build type - set(BENCHMARK_CXX_FLAGS_COVERAGE "${CMAKE_CXX_FLAGS_DEBUG}" - CACHE STRING "Flags used by the C++ compiler during coverage builds." - FORCE) - set(BENCHMARK_EXE_LINKER_FLAGS_COVERAGE "${CMAKE_EXE_LINKER_FLAGS_DEBUG}" - CACHE STRING "Flags used for linking binaries during coverage builds." - FORCE) - set(BENCHMARK_SHARED_LINKER_FLAGS_COVERAGE "${CMAKE_SHARED_LINKER_FLAGS_DEBUG}" - CACHE STRING "Flags used by the shared libraries linker during coverage builds." - FORCE) - mark_as_advanced( - BENCHMARK_CXX_FLAGS_COVERAGE - BENCHMARK_EXE_LINKER_FLAGS_COVERAGE - BENCHMARK_SHARED_LINKER_FLAGS_COVERAGE) - set(CMAKE_BUILD_TYPE "${CMAKE_BUILD_TYPE}" CACHE STRING - "Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel Coverage.") - add_cxx_compiler_flag(--coverage COVERAGE) -endif() - -if (BENCHMARK_USE_LIBCXX) - if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") - add_cxx_compiler_flag(-stdlib=libc++) - elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU" OR - "${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel") - add_cxx_compiler_flag(-nostdinc++) - message(WARNING "libc++ header path must be manually specified using CMAKE_CXX_FLAGS") - # Adding -nodefaultlibs directly to CMAKE__LINKER_FLAGS will break - # configuration checks such as 'find_package(Threads)' - list(APPEND BENCHMARK_CXX_LINKER_FLAGS -nodefaultlibs) - # -lc++ cannot be added directly to CMAKE__LINKER_FLAGS because - # linker flags appear before all linker inputs and -lc++ must appear after. - list(APPEND BENCHMARK_CXX_LIBRARIES c++) - else() - message(FATAL_ERROR "-DBENCHMARK_USE_LIBCXX:BOOL=ON is not supported for compiler") - endif() -endif(BENCHMARK_USE_LIBCXX) - -set(EXTRA_CXX_FLAGS "") -if (WIN32 AND "${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") - # Clang on Windows fails to compile the regex feature check under C++11 - set(EXTRA_CXX_FLAGS "-DCMAKE_CXX_STANDARD=14") -endif() - -# C++ feature checks -# Determine the correct regular expression engine to use -cxx_feature_check(STD_REGEX ${EXTRA_CXX_FLAGS}) -cxx_feature_check(GNU_POSIX_REGEX ${EXTRA_CXX_FLAGS}) -cxx_feature_check(POSIX_REGEX ${EXTRA_CXX_FLAGS}) -if(NOT HAVE_STD_REGEX AND NOT HAVE_GNU_POSIX_REGEX AND NOT HAVE_POSIX_REGEX) - message(FATAL_ERROR "Failed to determine the source files for the regular expression backend") -endif() -if (NOT BENCHMARK_ENABLE_EXCEPTIONS AND HAVE_STD_REGEX - AND NOT HAVE_GNU_POSIX_REGEX AND NOT HAVE_POSIX_REGEX) - message(WARNING "Using std::regex with exceptions disabled is not fully supported") -endif() - -cxx_feature_check(STEADY_CLOCK) -# Ensure we have pthreads -set(THREADS_PREFER_PTHREAD_FLAG ON) -find_package(Threads REQUIRED) - -# Set up directories -include_directories(${PROJECT_SOURCE_DIR}/include) - -# Build the targets -add_subdirectory(src) - -if (BENCHMARK_ENABLE_TESTING) - enable_testing() - if (BENCHMARK_ENABLE_GTEST_TESTS AND - NOT (TARGET gtest AND TARGET gtest_main AND - TARGET gmock AND TARGET gmock_main)) - include(GoogleTest) - endif() - add_subdirectory(test) -endif() diff --git a/benchmarks/thirdparty/benchmark/CONTRIBUTING.md b/benchmarks/thirdparty/benchmark/CONTRIBUTING.md deleted file mode 100755 index 43de4c9d47..0000000000 --- a/benchmarks/thirdparty/benchmark/CONTRIBUTING.md +++ /dev/null @@ -1,58 +0,0 @@ -# How to contribute # - -We'd love to accept your patches and contributions to this project. There are -a just a few small guidelines you need to follow. - - -## Contributor License Agreement ## - -Contributions to any Google project must be accompanied by a Contributor -License Agreement. This is not a copyright **assignment**, it simply gives -Google permission to use and redistribute your contributions as part of the -project. - - * If you are an individual writing original source code and you're sure you - own the intellectual property, then you'll need to sign an [individual - CLA][]. - - * If you work for a company that wants to allow you to contribute your work, - then you'll need to sign a [corporate CLA][]. - -You generally only need to submit a CLA once, so if you've already submitted -one (even if it was for a different project), you probably don't need to do it -again. - -[individual CLA]: https://developers.google.com/open-source/cla/individual -[corporate CLA]: https://developers.google.com/open-source/cla/corporate - -Once your CLA is submitted (or if you already submitted one for -another Google project), make a commit adding yourself to the -[AUTHORS][] and [CONTRIBUTORS][] files. This commit can be part -of your first [pull request][]. - -[AUTHORS]: AUTHORS -[CONTRIBUTORS]: CONTRIBUTORS - - -## Submitting a patch ## - - 1. It's generally best to start by opening a new issue describing the bug or - feature you're intending to fix. Even if you think it's relatively minor, - it's helpful to know what people are working on. Mention in the initial - issue that you are planning to work on that bug or feature so that it can - be assigned to you. - - 1. Follow the normal process of [forking][] the project, and setup a new - branch to work in. It's important that each group of changes be done in - separate branches in order to ensure that a pull request only includes the - commits related to that bug or feature. - - 1. Do your best to have [well-formed commit messages][] for each change. - This provides consistency throughout the project, and ensures that commit - messages are able to be formatted properly by various git tools. - - 1. Finally, push the commits to your fork and submit a [pull request][]. - -[forking]: https://help.github.com/articles/fork-a-repo -[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html -[pull request]: https://help.github.com/articles/creating-a-pull-request diff --git a/benchmarks/thirdparty/benchmark/CONTRIBUTORS b/benchmarks/thirdparty/benchmark/CONTRIBUTORS deleted file mode 100755 index 6beed7166e..0000000000 --- a/benchmarks/thirdparty/benchmark/CONTRIBUTORS +++ /dev/null @@ -1,79 +0,0 @@ -# People who have agreed to one of the CLAs and can contribute patches. -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees are listed here -# but not in AUTHORS, because Google holds the copyright. -# -# Names should be added to this file only after verifying that -# the individual or the individual's organization has agreed to -# the appropriate Contributor License Agreement, found here: -# -# https://developers.google.com/open-source/cla/individual -# https://developers.google.com/open-source/cla/corporate -# -# The agreement for individuals can be filled out on the web. -# -# When adding J Random Contributor's name to this file, -# either J's name or J's organization's name should be -# added to the AUTHORS file, depending on whether the -# individual or corporate CLA was used. -# -# Names should be added to this file as: -# Name -# -# Please keep the list sorted. - -Albert Pretorius -Alex Steele -Andriy Berestovskyy -Arne Beer -Billy Robert O'Neal III -Chris Kennelly -Christian Wassermann -Christopher Seymour -Colin Braley -Cyrille Faucheux -Daniel Harvey -David Coeurjolly -Deniz Evrenci -Dominic Hamon -Dominik Czarnota -Eric Backus -Eric Fiselier -Eugene Zhuk -Evgeny Safronov -Federico Ficarelli -Felix Homann -Geoffrey Martin-Noble -GergΕ‘ SzitΓ‘r -Hannes Hauswedell -Ismael Jimenez Martinez -Jern-Kuan Leong -JianXiong Zhou -Joao Paulo Magalhaes -John Millikin -Jordan Williams -Jussi Knuuttila -Kai Wolf -Kaito Udagawa -Kishan Kumar -Lei Xu -Matt Clarkson -Maxim Vafin -Nick Hutchinson -Oleksandr Sochka -Ori Livneh -Pascal Leroy -Paul Redmond -Pierre Phaneuf -Radoslav Yovchev -Raul Marin -Ray Glover -Robert Guo -Roman Lebedev -Sayan Bhattacharjee -Shuo Chen -Tobias UlvgΓ₯rd -Tom Madams -Yixuan Qiu -Yusuke Suzuki -Zbigniew Skowron diff --git a/benchmarks/thirdparty/benchmark/LICENSE b/benchmarks/thirdparty/benchmark/LICENSE deleted file mode 100755 index d645695673..0000000000 --- a/benchmarks/thirdparty/benchmark/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/benchmarks/thirdparty/benchmark/README.md b/benchmarks/thirdparty/benchmark/README.md deleted file mode 100755 index 41a1bdff75..0000000000 --- a/benchmarks/thirdparty/benchmark/README.md +++ /dev/null @@ -1,1319 +0,0 @@ -# Benchmark - -[![Build Status](https://travis-ci.org/google/benchmark.svg?branch=master)](https://travis-ci.org/google/benchmark) -[![Build status](https://ci.appveyor.com/api/projects/status/u0qsyp7t1tk7cpxs/branch/master?svg=true)](https://ci.appveyor.com/project/google/benchmark/branch/master) -[![Coverage Status](https://coveralls.io/repos/google/benchmark/badge.svg)](https://coveralls.io/r/google/benchmark) -[![slackin](https://slackin-iqtfqnpzxd.now.sh/badge.svg)](https://slackin-iqtfqnpzxd.now.sh/) - -A library to benchmark code snippets, similar to unit tests. Example: - -```c++ -#include - -static void BM_SomeFunction(benchmark::State& state) { - // Perform setup here - for (auto _ : state) { - // This code gets timed - SomeFunction(); - } -} -// Register the function as a benchmark -BENCHMARK(BM_SomeFunction); -// Run the benchmark -BENCHMARK_MAIN(); -``` - -To get started, see [Requirements](#requirements) and -[Installation](#installation). See [Usage](#usage) for a full example and the -[User Guide](#user-guide) for a more comprehensive feature overview. - -It may also help to read the [Google Test documentation](https://github.com/google/googletest/blob/master/googletest/docs/primer.md) -as some of the structural aspects of the APIs are similar. - -### Resources - -[Discussion group](https://groups.google.com/d/forum/benchmark-discuss) - -IRC channel: [freenode](https://freenode.net) #googlebenchmark - -[Additional Tooling Documentation](docs/tools.md) - -[Assembly Testing Documentation](docs/AssemblyTests.md) - -## Requirements - -The library can be used with C++03. However, it requires C++11 to build, -including compiler and standard library support. - -The following minimum versions are required to build the library: - -* GCC 4.8 -* Clang 3.4 -* Visual Studio 14 2015 -* Intel 2015 Update 1 - -See [Platform-Specific Build Instructions](#platform-specific-build-instructions). - -## Installation - -This describes the installation process using cmake. As pre-requisites, you'll -need git and cmake installed. - -_See [dependencies.md](dependencies.md) for more details regarding supported -versions of build tools._ - -```bash -# Check out the library. -$ git clone https://github.com/google/benchmark.git -# Benchmark requires Google Test as a dependency. Add the source tree as a subdirectory. -$ git clone https://github.com/google/googletest.git benchmark/googletest -# Go to the library root directory -$ cd benchmark -# Make a build directory to place the build output. -$ cmake -E make_directory "build" -# Generate build system files with cmake. -$ cmake -E chdir "build" cmake -DCMAKE_BUILD_TYPE=Release ../ -# or, starting with CMake 3.13, use a simpler form: -# cmake -DCMAKE_BUILD_TYPE=Release -S . -B "build" -# Build the library. -$ cmake --build "build" --config Release -``` -This builds the `benchmark` and `benchmark_main` libraries and tests. -On a unix system, the build directory should now look something like this: - -``` -/benchmark - /build - /src - /libbenchmark.a - /libbenchmark_main.a - /test - ... -``` - -Next, you can run the tests to check the build. - -```bash -$ cmake -E chdir "build" ctest --build-config Release -``` - -If you want to install the library globally, also run: - -``` -sudo cmake --build "build" --config Release --target install -``` - -Note that Google Benchmark requires Google Test to build and run the tests. This -dependency can be provided two ways: - -* Checkout the Google Test sources into `benchmark/googletest` as above. -* Otherwise, if `-DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON` is specified during - configuration, the library will automatically download and build any required - dependencies. - -If you do not wish to build and run the tests, add `-DBENCHMARK_ENABLE_GTEST_TESTS=OFF` -to `CMAKE_ARGS`. - -### Debug vs Release - -By default, benchmark builds as a debug library. You will see a warning in the -output when this is the case. To build it as a release library instead, add -`-DCMAKE_BUILD_TYPE=Release` when generating the build system files, as shown -above. The use of `--config Release` in build commands is needed to properly -support multi-configuration tools (like Visual Studio for example) and can be -skipped for other build systems (like Makefile). - -To enable link-time optimisation, also add `-DBENCHMARK_ENABLE_LTO=true` when -generating the build system files. - -If you are using gcc, you might need to set `GCC_AR` and `GCC_RANLIB` cmake -cache variables, if autodetection fails. - -If you are using clang, you may need to set `LLVMAR_EXECUTABLE`, -`LLVMNM_EXECUTABLE` and `LLVMRANLIB_EXECUTABLE` cmake cache variables. - -### Stable and Experimental Library Versions - -The main branch contains the latest stable version of the benchmarking library; -the API of which can be considered largely stable, with source breaking changes -being made only upon the release of a new major version. - -Newer, experimental, features are implemented and tested on the -[`v2` branch](https://github.com/google/benchmark/tree/v2). Users who wish -to use, test, and provide feedback on the new features are encouraged to try -this branch. However, this branch provides no stability guarantees and reserves -the right to change and break the API at any time. - -## Usage - -### Basic usage - -Define a function that executes the code to measure, register it as a benchmark -function using the `BENCHMARK` macro, and ensure an appropriate `main` function -is available: - -```c++ -#include - -static void BM_StringCreation(benchmark::State& state) { - for (auto _ : state) - std::string empty_string; -} -// Register the function as a benchmark -BENCHMARK(BM_StringCreation); - -// Define another benchmark -static void BM_StringCopy(benchmark::State& state) { - std::string x = "hello"; - for (auto _ : state) - std::string copy(x); -} -BENCHMARK(BM_StringCopy); - -BENCHMARK_MAIN(); -``` - -To run the benchmark, compile and link against the `benchmark` library -(libbenchmark.a/.so). If you followed the build steps above, this library will -be under the build directory you created. - -```bash -# Example on linux after running the build steps above. Assumes the -# `benchmark` and `build` directories are under the current directory. -$ g++ mybenchmark.cc -std=c++11 -isystem benchmark/include \ - -Lbenchmark/build/src -lbenchmark -lpthread -o mybenchmark -``` - -Alternatively, link against the `benchmark_main` library and remove -`BENCHMARK_MAIN();` above to get the same behavior. - -The compiled executable will run all benchmarks by default. Pass the `--help` -flag for option information or see the guide below. - -### Usage with CMake - -If using CMake, it is recommended to link against the project-provided -`benchmark::benchmark` and `benchmark::benchmark_main` targets using -`target_link_libraries`. -It is possible to use ```find_package``` to import an installed version of the -library. -```cmake -find_package(benchmark REQUIRED) -``` -Alternatively, ```add_subdirectory``` will incorporate the library directly in -to one's CMake project. -```cmake -add_subdirectory(benchmark) -``` -Either way, link to the library as follows. -```cmake -target_link_libraries(MyTarget benchmark::benchmark) -``` - -## Platform Specific Build Instructions - -### Building with GCC - -When the library is built using GCC it is necessary to link with the pthread -library due to how GCC implements `std::thread`. Failing to link to pthread will -lead to runtime exceptions (unless you're using libc++), not linker errors. See -[issue #67](https://github.com/google/benchmark/issues/67) for more details. You -can link to pthread by adding `-pthread` to your linker command. Note, you can -also use `-lpthread`, but there are potential issues with ordering of command -line parameters if you use that. - -### Building with Visual Studio 2015 or 2017 - -The `shlwapi` library (`-lshlwapi`) is required to support a call to `CPUInfo` which reads the registry. Either add `shlwapi.lib` under `[ Configuration Properties > Linker > Input ]`, or use the following: - -``` -// Alternatively, can add libraries using linker options. -#ifdef _WIN32 -#pragma comment ( lib, "Shlwapi.lib" ) -#ifdef _DEBUG -#pragma comment ( lib, "benchmarkd.lib" ) -#else -#pragma comment ( lib, "benchmark.lib" ) -#endif -#endif -``` - -Can also use the graphical version of CMake: -* Open `CMake GUI`. -* Under `Where to build the binaries`, same path as source plus `build`. -* Under `CMAKE_INSTALL_PREFIX`, same path as source plus `install`. -* Click `Configure`, `Generate`, `Open Project`. -* If build fails, try deleting entire directory and starting again, or unticking options to build less. - -### Building with Intel 2015 Update 1 or Intel System Studio Update 4 - -See instructions for building with Visual Studio. Once built, right click on the solution and change the build to Intel. - -### Building on Solaris - -If you're running benchmarks on solaris, you'll want the kstat library linked in -too (`-lkstat`). - -## User Guide - -### Command Line - -[Output Formats](#output-formats) - -[Output Files](#output-files) - -[Running Benchmarks](#running-benchmarks) - -[Running a Subset of Benchmarks](#running-a-subset-of-benchmarks) - -[Result Comparison](#result-comparison) - -### Library - -[Runtime and Reporting Considerations](#runtime-and-reporting-considerations) - -[Passing Arguments](#passing-arguments) - -[Calculating Asymptotic Complexity](#asymptotic-complexity) - -[Templated Benchmarks](#templated-benchmarks) - -[Fixtures](#fixtures) - -[Custom Counters](#custom-counters) - -[Multithreaded Benchmarks](#multithreaded-benchmarks) - -[CPU Timers](#cpu-timers) - -[Manual Timing](#manual-timing) - -[Setting the Time Unit](#setting-the-time-unit) - -[Preventing Optimization](#preventing-optimization) - -[Reporting Statistics](#reporting-statistics) - -[Custom Statistics](#custom-statistics) - -[Using RegisterBenchmark](#using-register-benchmark) - -[Exiting with an Error](#exiting-with-an-error) - -[A Faster KeepRunning Loop](#a-faster-keep-running-loop) - -[Disabling CPU Frequency Scaling](#disabling-cpu-frequency-scaling) - - - - -### Output Formats - -The library supports multiple output formats. Use the -`--benchmark_format=` flag (or set the -`BENCHMARK_FORMAT=` environment variable) to set -the format type. `console` is the default format. - -The Console format is intended to be a human readable format. By default -the format generates color output. Context is output on stderr and the -tabular data on stdout. Example tabular output looks like: - -``` -Benchmark Time(ns) CPU(ns) Iterations ----------------------------------------------------------------------- -BM_SetInsert/1024/1 28928 29349 23853 133.097kB/s 33.2742k items/s -BM_SetInsert/1024/8 32065 32913 21375 949.487kB/s 237.372k items/s -BM_SetInsert/1024/10 33157 33648 21431 1.13369MB/s 290.225k items/s -``` - -The JSON format outputs human readable json split into two top level attributes. -The `context` attribute contains information about the run in general, including -information about the CPU and the date. -The `benchmarks` attribute contains a list of every benchmark run. Example json -output looks like: - -```json -{ - "context": { - "date": "2015/03/17-18:40:25", - "num_cpus": 40, - "mhz_per_cpu": 2801, - "cpu_scaling_enabled": false, - "build_type": "debug" - }, - "benchmarks": [ - { - "name": "BM_SetInsert/1024/1", - "iterations": 94877, - "real_time": 29275, - "cpu_time": 29836, - "bytes_per_second": 134066, - "items_per_second": 33516 - }, - { - "name": "BM_SetInsert/1024/8", - "iterations": 21609, - "real_time": 32317, - "cpu_time": 32429, - "bytes_per_second": 986770, - "items_per_second": 246693 - }, - { - "name": "BM_SetInsert/1024/10", - "iterations": 21393, - "real_time": 32724, - "cpu_time": 33355, - "bytes_per_second": 1199226, - "items_per_second": 299807 - } - ] -} -``` - -The CSV format outputs comma-separated values. The `context` is output on stderr -and the CSV itself on stdout. Example CSV output looks like: - -``` -name,iterations,real_time,cpu_time,bytes_per_second,items_per_second,label -"BM_SetInsert/1024/1",65465,17890.7,8407.45,475768,118942, -"BM_SetInsert/1024/8",116606,18810.1,9766.64,3.27646e+06,819115, -"BM_SetInsert/1024/10",106365,17238.4,8421.53,4.74973e+06,1.18743e+06, -``` - - - -### Output Files - -Write benchmark results to a file with the `--benchmark_out=` option -(or set `BENCHMARK_OUT`). Specify the output format with -`--benchmark_out_format={json|console|csv}` (or set -`BENCHMARK_OUT_FORMAT={json|console|csv}`). Note that specifying -`--benchmark_out` does not suppress the console output. - - - -### Running Benchmarks - -Benchmarks are executed by running the produced binaries. Benchmarks binaries, -by default, accept options that may be specified either through their command -line interface or by setting environment variables before execution. For every -`--option_flag=` CLI switch, a corresponding environment variable -`OPTION_FLAG=` exist and is used as default if set (CLI switches always - prevails). A complete list of CLI options is available running benchmarks - with the `--help` switch. - - - -### Running a Subset of Benchmarks - -The `--benchmark_filter=` option (or `BENCHMARK_FILTER=` -environment variable) can be used to only run the benchmarks that match -the specified ``. For example: - -```bash -$ ./run_benchmarks.x --benchmark_filter=BM_memcpy/32 -Run on (1 X 2300 MHz CPU ) -2016-06-25 19:34:24 -Benchmark Time CPU Iterations ----------------------------------------------------- -BM_memcpy/32 11 ns 11 ns 79545455 -BM_memcpy/32k 2181 ns 2185 ns 324074 -BM_memcpy/32 12 ns 12 ns 54687500 -BM_memcpy/32k 1834 ns 1837 ns 357143 -``` - - - -### Result comparison - -It is possible to compare the benchmarking results. -See [Additional Tooling Documentation](docs/tools.md) - - - -### Runtime and Reporting Considerations - -When the benchmark binary is executed, each benchmark function is run serially. -The number of iterations to run is determined dynamically by running the -benchmark a few times and measuring the time taken and ensuring that the -ultimate result will be statistically stable. As such, faster benchmark -functions will be run for more iterations than slower benchmark functions, and -the number of iterations is thus reported. - -In all cases, the number of iterations for which the benchmark is run is -governed by the amount of time the benchmark takes. Concretely, the number of -iterations is at least one, not more than 1e9, until CPU time is greater than -the minimum time, or the wallclock time is 5x minimum time. The minimum time is -set per benchmark by calling `MinTime` on the registered benchmark object. - -Average timings are then reported over the iterations run. If multiple -repetitions are requested using the `--benchmark_repetitions` command-line -option, or at registration time, the benchmark function will be run several -times and statistical results across these repetitions will also be reported. - -As well as the per-benchmark entries, a preamble in the report will include -information about the machine on which the benchmarks are run. - - - -### Passing Arguments - -Sometimes a family of benchmarks can be implemented with just one routine that -takes an extra argument to specify which one of the family of benchmarks to -run. For example, the following code defines a family of benchmarks for -measuring the speed of `memcpy()` calls of different lengths: - -```c++ -static void BM_memcpy(benchmark::State& state) { - char* src = new char[state.range(0)]; - char* dst = new char[state.range(0)]; - memset(src, 'x', state.range(0)); - for (auto _ : state) - memcpy(dst, src, state.range(0)); - state.SetBytesProcessed(int64_t(state.iterations()) * - int64_t(state.range(0))); - delete[] src; - delete[] dst; -} -BENCHMARK(BM_memcpy)->Arg(8)->Arg(64)->Arg(512)->Arg(1<<10)->Arg(8<<10); -``` - -The preceding code is quite repetitive, and can be replaced with the following -short-hand. The following invocation will pick a few appropriate arguments in -the specified range and will generate a benchmark for each such argument. - -```c++ -BENCHMARK(BM_memcpy)->Range(8, 8<<10); -``` - -By default the arguments in the range are generated in multiples of eight and -the command above selects [ 8, 64, 512, 4k, 8k ]. In the following code the -range multiplier is changed to multiples of two. - -```c++ -BENCHMARK(BM_memcpy)->RangeMultiplier(2)->Range(8, 8<<10); -``` - -Now arguments generated are [ 8, 16, 32, 64, 128, 256, 512, 1024, 2k, 4k, 8k ]. - -The preceding code shows a method of defining a sparse range. The following -example shows a method of defining a dense range. It is then used to benchmark -the performance of `std::vector` initialization for uniformly increasing sizes. - -```c++ -static void BM_DenseRange(benchmark::State& state) { - for(auto _ : state) { - std::vector v(state.range(0), state.range(0)); - benchmark::DoNotOptimize(v.data()); - benchmark::ClobberMemory(); - } -} -BENCHMARK(BM_DenseRange)->DenseRange(0, 1024, 128); -``` - -Now arguments generated are [ 0, 128, 256, 384, 512, 640, 768, 896, 1024 ]. - -You might have a benchmark that depends on two or more inputs. For example, the -following code defines a family of benchmarks for measuring the speed of set -insertion. - -```c++ -static void BM_SetInsert(benchmark::State& state) { - std::set data; - for (auto _ : state) { - state.PauseTiming(); - data = ConstructRandomSet(state.range(0)); - state.ResumeTiming(); - for (int j = 0; j < state.range(1); ++j) - data.insert(RandomNumber()); - } -} -BENCHMARK(BM_SetInsert) - ->Args({1<<10, 128}) - ->Args({2<<10, 128}) - ->Args({4<<10, 128}) - ->Args({8<<10, 128}) - ->Args({1<<10, 512}) - ->Args({2<<10, 512}) - ->Args({4<<10, 512}) - ->Args({8<<10, 512}); -``` - -The preceding code is quite repetitive, and can be replaced with the following -short-hand. The following macro will pick a few appropriate arguments in the -product of the two specified ranges and will generate a benchmark for each such -pair. - -```c++ -BENCHMARK(BM_SetInsert)->Ranges({{1<<10, 8<<10}, {128, 512}}); -``` - -Some benchmarks may require specific argument values that cannot be expressed -with `Ranges`. In this case, `ArgsProduct` offers the ability to generate a -benchmark input for each combination in the product of the supplied vectors. - -```c++ -BENCHMARK(BM_SetInsert) - ->ArgsProduct({{1<<10, 3<<10, 8<<10}, {20, 40, 60, 80}}) -// would generate the same benchmark arguments as -BENCHMARK(BM_SetInsert) - ->Args({1<<10, 20}) - ->Args({3<<10, 20}) - ->Args({8<<10, 20}) - ->Args({3<<10, 40}) - ->Args({8<<10, 40}) - ->Args({1<<10, 40}) - ->Args({1<<10, 60}) - ->Args({3<<10, 60}) - ->Args({8<<10, 60}) - ->Args({1<<10, 80}) - ->Args({3<<10, 80}) - ->Args({8<<10, 80}); -``` - -For more complex patterns of inputs, passing a custom function to `Apply` allows -programmatic specification of an arbitrary set of arguments on which to run the -benchmark. The following example enumerates a dense range on one parameter, -and a sparse range on the second. - -```c++ -static void CustomArguments(benchmark::internal::Benchmark* b) { - for (int i = 0; i <= 10; ++i) - for (int j = 32; j <= 1024*1024; j *= 8) - b->Args({i, j}); -} -BENCHMARK(BM_SetInsert)->Apply(CustomArguments); -``` - -#### Passing Arbitrary Arguments to a Benchmark - -In C++11 it is possible to define a benchmark that takes an arbitrary number -of extra arguments. The `BENCHMARK_CAPTURE(func, test_case_name, ...args)` -macro creates a benchmark that invokes `func` with the `benchmark::State` as -the first argument followed by the specified `args...`. -The `test_case_name` is appended to the name of the benchmark and -should describe the values passed. - -```c++ -template -void BM_takes_args(benchmark::State& state, ExtraArgs&&... extra_args) { - [...] -} -// Registers a benchmark named "BM_takes_args/int_string_test" that passes -// the specified values to `extra_args`. -BENCHMARK_CAPTURE(BM_takes_args, int_string_test, 42, std::string("abc")); -``` - -Note that elements of `...args` may refer to global variables. Users should -avoid modifying global state inside of a benchmark. - - - -### Calculating Asymptotic Complexity (Big O) - -Asymptotic complexity might be calculated for a family of benchmarks. The -following code will calculate the coefficient for the high-order term in the -running time and the normalized root-mean square error of string comparison. - -```c++ -static void BM_StringCompare(benchmark::State& state) { - std::string s1(state.range(0), '-'); - std::string s2(state.range(0), '-'); - for (auto _ : state) { - benchmark::DoNotOptimize(s1.compare(s2)); - } - state.SetComplexityN(state.range(0)); -} -BENCHMARK(BM_StringCompare) - ->RangeMultiplier(2)->Range(1<<10, 1<<18)->Complexity(benchmark::oN); -``` - -As shown in the following invocation, asymptotic complexity might also be -calculated automatically. - -```c++ -BENCHMARK(BM_StringCompare) - ->RangeMultiplier(2)->Range(1<<10, 1<<18)->Complexity(); -``` - -The following code will specify asymptotic complexity with a lambda function, -that might be used to customize high-order term calculation. - -```c++ -BENCHMARK(BM_StringCompare)->RangeMultiplier(2) - ->Range(1<<10, 1<<18)->Complexity([](benchmark::IterationCount n)->double{return n; }); -``` - - - -### Templated Benchmarks - -This example produces and consumes messages of size `sizeof(v)` `range_x` -times. It also outputs throughput in the absence of multiprogramming. - -```c++ -template void BM_Sequential(benchmark::State& state) { - Q q; - typename Q::value_type v; - for (auto _ : state) { - for (int i = state.range(0); i--; ) - q.push(v); - for (int e = state.range(0); e--; ) - q.Wait(&v); - } - // actually messages, not bytes: - state.SetBytesProcessed( - static_cast(state.iterations())*state.range(0)); -} -BENCHMARK_TEMPLATE(BM_Sequential, WaitQueue)->Range(1<<0, 1<<10); -``` - -Three macros are provided for adding benchmark templates. - -```c++ -#ifdef BENCHMARK_HAS_CXX11 -#define BENCHMARK_TEMPLATE(func, ...) // Takes any number of parameters. -#else // C++ < C++11 -#define BENCHMARK_TEMPLATE(func, arg1) -#endif -#define BENCHMARK_TEMPLATE1(func, arg1) -#define BENCHMARK_TEMPLATE2(func, arg1, arg2) -``` - - - -### Fixtures - -Fixture tests are created by first defining a type that derives from -`::benchmark::Fixture` and then creating/registering the tests using the -following macros: - -* `BENCHMARK_F(ClassName, Method)` -* `BENCHMARK_DEFINE_F(ClassName, Method)` -* `BENCHMARK_REGISTER_F(ClassName, Method)` - -For Example: - -```c++ -class MyFixture : public benchmark::Fixture { -public: - void SetUp(const ::benchmark::State& state) { - } - - void TearDown(const ::benchmark::State& state) { - } -}; - -BENCHMARK_F(MyFixture, FooTest)(benchmark::State& st) { - for (auto _ : st) { - ... - } -} - -BENCHMARK_DEFINE_F(MyFixture, BarTest)(benchmark::State& st) { - for (auto _ : st) { - ... - } -} -/* BarTest is NOT registered */ -BENCHMARK_REGISTER_F(MyFixture, BarTest)->Threads(2); -/* BarTest is now registered */ -``` - -#### Templated Fixtures - -Also you can create templated fixture by using the following macros: - -* `BENCHMARK_TEMPLATE_F(ClassName, Method, ...)` -* `BENCHMARK_TEMPLATE_DEFINE_F(ClassName, Method, ...)` - -For example: - -```c++ -template -class MyFixture : public benchmark::Fixture {}; - -BENCHMARK_TEMPLATE_F(MyFixture, IntTest, int)(benchmark::State& st) { - for (auto _ : st) { - ... - } -} - -BENCHMARK_TEMPLATE_DEFINE_F(MyFixture, DoubleTest, double)(benchmark::State& st) { - for (auto _ : st) { - ... - } -} - -BENCHMARK_REGISTER_F(MyFixture, DoubleTest)->Threads(2); -``` - - - -### Custom Counters - -You can add your own counters with user-defined names. The example below -will add columns "Foo", "Bar" and "Baz" in its output: - -```c++ -static void UserCountersExample1(benchmark::State& state) { - double numFoos = 0, numBars = 0, numBazs = 0; - for (auto _ : state) { - // ... count Foo,Bar,Baz events - } - state.counters["Foo"] = numFoos; - state.counters["Bar"] = numBars; - state.counters["Baz"] = numBazs; -} -``` - -The `state.counters` object is a `std::map` with `std::string` keys -and `Counter` values. The latter is a `double`-like class, via an implicit -conversion to `double&`. Thus you can use all of the standard arithmetic -assignment operators (`=,+=,-=,*=,/=`) to change the value of each counter. - -In multithreaded benchmarks, each counter is set on the calling thread only. -When the benchmark finishes, the counters from each thread will be summed; -the resulting sum is the value which will be shown for the benchmark. - -The `Counter` constructor accepts three parameters: the value as a `double` -; a bit flag which allows you to show counters as rates, and/or as per-thread -iteration, and/or as per-thread averages, and/or iteration invariants, -and/or finally inverting the result; and a flag specifying the 'unit' - i.e. -is 1k a 1000 (default, `benchmark::Counter::OneK::kIs1000`), or 1024 -(`benchmark::Counter::OneK::kIs1024`)? - -```c++ - // sets a simple counter - state.counters["Foo"] = numFoos; - - // Set the counter as a rate. It will be presented divided - // by the duration of the benchmark. - // Meaning: per one second, how many 'foo's are processed? - state.counters["FooRate"] = Counter(numFoos, benchmark::Counter::kIsRate); - - // Set the counter as a rate. It will be presented divided - // by the duration of the benchmark, and the result inverted. - // Meaning: how many seconds it takes to process one 'foo'? - state.counters["FooInvRate"] = Counter(numFoos, benchmark::Counter::kIsRate | benchmark::Counter::kInvert); - - // Set the counter as a thread-average quantity. It will - // be presented divided by the number of threads. - state.counters["FooAvg"] = Counter(numFoos, benchmark::Counter::kAvgThreads); - - // There's also a combined flag: - state.counters["FooAvgRate"] = Counter(numFoos,benchmark::Counter::kAvgThreadsRate); - - // This says that we process with the rate of state.range(0) bytes every iteration: - state.counters["BytesProcessed"] = Counter(state.range(0), benchmark::Counter::kIsIterationInvariantRate, benchmark::Counter::OneK::kIs1024); -``` - -When you're compiling in C++11 mode or later you can use `insert()` with -`std::initializer_list`: - -```c++ - // With C++11, this can be done: - state.counters.insert({{"Foo", numFoos}, {"Bar", numBars}, {"Baz", numBazs}}); - // ... instead of: - state.counters["Foo"] = numFoos; - state.counters["Bar"] = numBars; - state.counters["Baz"] = numBazs; -``` - -#### Counter Reporting - -When using the console reporter, by default, user counters are printed at -the end after the table, the same way as ``bytes_processed`` and -``items_processed``. This is best for cases in which there are few counters, -or where there are only a couple of lines per benchmark. Here's an example of -the default output: - -``` ------------------------------------------------------------------------------- -Benchmark Time CPU Iterations UserCounters... ------------------------------------------------------------------------------- -BM_UserCounter/threads:8 2248 ns 10277 ns 68808 Bar=16 Bat=40 Baz=24 Foo=8 -BM_UserCounter/threads:1 9797 ns 9788 ns 71523 Bar=2 Bat=5 Baz=3 Foo=1024m -BM_UserCounter/threads:2 4924 ns 9842 ns 71036 Bar=4 Bat=10 Baz=6 Foo=2 -BM_UserCounter/threads:4 2589 ns 10284 ns 68012 Bar=8 Bat=20 Baz=12 Foo=4 -BM_UserCounter/threads:8 2212 ns 10287 ns 68040 Bar=16 Bat=40 Baz=24 Foo=8 -BM_UserCounter/threads:16 1782 ns 10278 ns 68144 Bar=32 Bat=80 Baz=48 Foo=16 -BM_UserCounter/threads:32 1291 ns 10296 ns 68256 Bar=64 Bat=160 Baz=96 Foo=32 -BM_UserCounter/threads:4 2615 ns 10307 ns 68040 Bar=8 Bat=20 Baz=12 Foo=4 -BM_Factorial 26 ns 26 ns 26608979 40320 -BM_Factorial/real_time 26 ns 26 ns 26587936 40320 -BM_CalculatePiRange/1 16 ns 16 ns 45704255 0 -BM_CalculatePiRange/8 73 ns 73 ns 9520927 3.28374 -BM_CalculatePiRange/64 609 ns 609 ns 1140647 3.15746 -BM_CalculatePiRange/512 4900 ns 4901 ns 142696 3.14355 -``` - -If this doesn't suit you, you can print each counter as a table column by -passing the flag `--benchmark_counters_tabular=true` to the benchmark -application. This is best for cases in which there are a lot of counters, or -a lot of lines per individual benchmark. Note that this will trigger a -reprinting of the table header any time the counter set changes between -individual benchmarks. Here's an example of corresponding output when -`--benchmark_counters_tabular=true` is passed: - -``` ---------------------------------------------------------------------------------------- -Benchmark Time CPU Iterations Bar Bat Baz Foo ---------------------------------------------------------------------------------------- -BM_UserCounter/threads:8 2198 ns 9953 ns 70688 16 40 24 8 -BM_UserCounter/threads:1 9504 ns 9504 ns 73787 2 5 3 1 -BM_UserCounter/threads:2 4775 ns 9550 ns 72606 4 10 6 2 -BM_UserCounter/threads:4 2508 ns 9951 ns 70332 8 20 12 4 -BM_UserCounter/threads:8 2055 ns 9933 ns 70344 16 40 24 8 -BM_UserCounter/threads:16 1610 ns 9946 ns 70720 32 80 48 16 -BM_UserCounter/threads:32 1192 ns 9948 ns 70496 64 160 96 32 -BM_UserCounter/threads:4 2506 ns 9949 ns 70332 8 20 12 4 --------------------------------------------------------------- -Benchmark Time CPU Iterations --------------------------------------------------------------- -BM_Factorial 26 ns 26 ns 26392245 40320 -BM_Factorial/real_time 26 ns 26 ns 26494107 40320 -BM_CalculatePiRange/1 15 ns 15 ns 45571597 0 -BM_CalculatePiRange/8 74 ns 74 ns 9450212 3.28374 -BM_CalculatePiRange/64 595 ns 595 ns 1173901 3.15746 -BM_CalculatePiRange/512 4752 ns 4752 ns 147380 3.14355 -BM_CalculatePiRange/4k 37970 ns 37972 ns 18453 3.14184 -BM_CalculatePiRange/32k 303733 ns 303744 ns 2305 3.14162 -BM_CalculatePiRange/256k 2434095 ns 2434186 ns 288 3.1416 -BM_CalculatePiRange/1024k 9721140 ns 9721413 ns 71 3.14159 -BM_CalculatePi/threads:8 2255 ns 9943 ns 70936 -``` - -Note above the additional header printed when the benchmark changes from -``BM_UserCounter`` to ``BM_Factorial``. This is because ``BM_Factorial`` does -not have the same counter set as ``BM_UserCounter``. - - - -### Multithreaded Benchmarks - -In a multithreaded test (benchmark invoked by multiple threads simultaneously), -it is guaranteed that none of the threads will start until all have reached -the start of the benchmark loop, and all will have finished before any thread -exits the benchmark loop. (This behavior is also provided by the `KeepRunning()` -API) As such, any global setup or teardown can be wrapped in a check against the thread -index: - -```c++ -static void BM_MultiThreaded(benchmark::State& state) { - if (state.thread_index == 0) { - // Setup code here. - } - for (auto _ : state) { - // Run the test as normal. - } - if (state.thread_index == 0) { - // Teardown code here. - } -} -BENCHMARK(BM_MultiThreaded)->Threads(2); -``` - -If the benchmarked code itself uses threads and you want to compare it to -single-threaded code, you may want to use real-time ("wallclock") measurements -for latency comparisons: - -```c++ -BENCHMARK(BM_test)->Range(8, 8<<10)->UseRealTime(); -``` - -Without `UseRealTime`, CPU time is used by default. - - - -### CPU Timers - -By default, the CPU timer only measures the time spent by the main thread. -If the benchmark itself uses threads internally, this measurement may not -be what you are looking for. Instead, there is a way to measure the total -CPU usage of the process, by all the threads. - -```c++ -void callee(int i); - -static void MyMain(int size) { -#pragma omp parallel for - for(int i = 0; i < size; i++) - callee(i); -} - -static void BM_OpenMP(benchmark::State& state) { - for (auto _ : state) - MyMain(state.range(0)); -} - -// Measure the time spent by the main thread, use it to decide for how long to -// run the benchmark loop. Depending on the internal implementation detail may -// measure to anywhere from near-zero (the overhead spent before/after work -// handoff to worker thread[s]) to the whole single-thread time. -BENCHMARK(BM_OpenMP)->Range(8, 8<<10); - -// Measure the user-visible time, the wall clock (literally, the time that -// has passed on the clock on the wall), use it to decide for how long to -// run the benchmark loop. This will always be meaningful, an will match the -// time spent by the main thread in single-threaded case, in general decreasing -// with the number of internal threads doing the work. -BENCHMARK(BM_OpenMP)->Range(8, 8<<10)->UseRealTime(); - -// Measure the total CPU consumption, use it to decide for how long to -// run the benchmark loop. This will always measure to no less than the -// time spent by the main thread in single-threaded case. -BENCHMARK(BM_OpenMP)->Range(8, 8<<10)->MeasureProcessCPUTime(); - -// A mixture of the last two. Measure the total CPU consumption, but use the -// wall clock to decide for how long to run the benchmark loop. -BENCHMARK(BM_OpenMP)->Range(8, 8<<10)->MeasureProcessCPUTime()->UseRealTime(); -``` - -#### Controlling Timers - -Normally, the entire duration of the work loop (`for (auto _ : state) {}`) -is measured. But sometimes, it is necessary to do some work inside of -that loop, every iteration, but without counting that time to the benchmark time. -That is possible, although it is not recommended, since it has high overhead. - -```c++ -static void BM_SetInsert_With_Timer_Control(benchmark::State& state) { - std::set data; - for (auto _ : state) { - state.PauseTiming(); // Stop timers. They will not count until they are resumed. - data = ConstructRandomSet(state.range(0)); // Do something that should not be measured - state.ResumeTiming(); // And resume timers. They are now counting again. - // The rest will be measured. - for (int j = 0; j < state.range(1); ++j) - data.insert(RandomNumber()); - } -} -BENCHMARK(BM_SetInsert_With_Timer_Control)->Ranges({{1<<10, 8<<10}, {128, 512}}); -``` - - - -### Manual Timing - -For benchmarking something for which neither CPU time nor real-time are -correct or accurate enough, completely manual timing is supported using -the `UseManualTime` function. - -When `UseManualTime` is used, the benchmarked code must call -`SetIterationTime` once per iteration of the benchmark loop to -report the manually measured time. - -An example use case for this is benchmarking GPU execution (e.g. OpenCL -or CUDA kernels, OpenGL or Vulkan or Direct3D draw calls), which cannot -be accurately measured using CPU time or real-time. Instead, they can be -measured accurately using a dedicated API, and these measurement results -can be reported back with `SetIterationTime`. - -```c++ -static void BM_ManualTiming(benchmark::State& state) { - int microseconds = state.range(0); - std::chrono::duration sleep_duration { - static_cast(microseconds) - }; - - for (auto _ : state) { - auto start = std::chrono::high_resolution_clock::now(); - // Simulate some useful workload with a sleep - std::this_thread::sleep_for(sleep_duration); - auto end = std::chrono::high_resolution_clock::now(); - - auto elapsed_seconds = - std::chrono::duration_cast>( - end - start); - - state.SetIterationTime(elapsed_seconds.count()); - } -} -BENCHMARK(BM_ManualTiming)->Range(1, 1<<17)->UseManualTime(); -``` - - - -### Setting the Time Unit - -If a benchmark runs a few milliseconds it may be hard to visually compare the -measured times, since the output data is given in nanoseconds per default. In -order to manually set the time unit, you can specify it manually: - -```c++ -BENCHMARK(BM_test)->Unit(benchmark::kMillisecond); -``` - - - -### Preventing Optimization - -To prevent a value or expression from being optimized away by the compiler -the `benchmark::DoNotOptimize(...)` and `benchmark::ClobberMemory()` -functions can be used. - -```c++ -static void BM_test(benchmark::State& state) { - for (auto _ : state) { - int x = 0; - for (int i=0; i < 64; ++i) { - benchmark::DoNotOptimize(x += i); - } - } -} -``` - -`DoNotOptimize()` forces the *result* of `` to be stored in either -memory or a register. For GNU based compilers it acts as read/write barrier -for global memory. More specifically it forces the compiler to flush pending -writes to memory and reload any other values as necessary. - -Note that `DoNotOptimize()` does not prevent optimizations on `` -in any way. `` may even be removed entirely when the result is already -known. For example: - -```c++ - /* Example 1: `` is removed entirely. */ - int foo(int x) { return x + 42; } - while (...) DoNotOptimize(foo(0)); // Optimized to DoNotOptimize(42); - - /* Example 2: Result of '' is only reused */ - int bar(int) __attribute__((const)); - while (...) DoNotOptimize(bar(0)); // Optimized to: - // int __result__ = bar(0); - // while (...) DoNotOptimize(__result__); -``` - -The second tool for preventing optimizations is `ClobberMemory()`. In essence -`ClobberMemory()` forces the compiler to perform all pending writes to global -memory. Memory managed by block scope objects must be "escaped" using -`DoNotOptimize(...)` before it can be clobbered. In the below example -`ClobberMemory()` prevents the call to `v.push_back(42)` from being optimized -away. - -```c++ -static void BM_vector_push_back(benchmark::State& state) { - for (auto _ : state) { - std::vector v; - v.reserve(1); - benchmark::DoNotOptimize(v.data()); // Allow v.data() to be clobbered. - v.push_back(42); - benchmark::ClobberMemory(); // Force 42 to be written to memory. - } -} -``` - -Note that `ClobberMemory()` is only available for GNU or MSVC based compilers. - - - -### Statistics: Reporting the Mean, Median and Standard Deviation of Repeated Benchmarks - -By default each benchmark is run once and that single result is reported. -However benchmarks are often noisy and a single result may not be representative -of the overall behavior. For this reason it's possible to repeatedly rerun the -benchmark. - -The number of runs of each benchmark is specified globally by the -`--benchmark_repetitions` flag or on a per benchmark basis by calling -`Repetitions` on the registered benchmark object. When a benchmark is run more -than once the mean, median and standard deviation of the runs will be reported. - -Additionally the `--benchmark_report_aggregates_only={true|false}`, -`--benchmark_display_aggregates_only={true|false}` flags or -`ReportAggregatesOnly(bool)`, `DisplayAggregatesOnly(bool)` functions can be -used to change how repeated tests are reported. By default the result of each -repeated run is reported. When `report aggregates only` option is `true`, -only the aggregates (i.e. mean, median and standard deviation, maybe complexity -measurements if they were requested) of the runs is reported, to both the -reporters - standard output (console), and the file. -However when only the `display aggregates only` option is `true`, -only the aggregates are displayed in the standard output, while the file -output still contains everything. -Calling `ReportAggregatesOnly(bool)` / `DisplayAggregatesOnly(bool)` on a -registered benchmark object overrides the value of the appropriate flag for that -benchmark. - - - -### Custom Statistics - -While having mean, median and standard deviation is nice, this may not be -enough for everyone. For example you may want to know what the largest -observation is, e.g. because you have some real-time constraints. This is easy. -The following code will specify a custom statistic to be calculated, defined -by a lambda function. - -```c++ -void BM_spin_empty(benchmark::State& state) { - for (auto _ : state) { - for (int x = 0; x < state.range(0); ++x) { - benchmark::DoNotOptimize(x); - } - } -} - -BENCHMARK(BM_spin_empty) - ->ComputeStatistics("max", [](const std::vector& v) -> double { - return *(std::max_element(std::begin(v), std::end(v))); - }) - ->Arg(512); -``` - - - -### Using RegisterBenchmark(name, fn, args...) - -The `RegisterBenchmark(name, func, args...)` function provides an alternative -way to create and register benchmarks. -`RegisterBenchmark(name, func, args...)` creates, registers, and returns a -pointer to a new benchmark with the specified `name` that invokes -`func(st, args...)` where `st` is a `benchmark::State` object. - -Unlike the `BENCHMARK` registration macros, which can only be used at the global -scope, the `RegisterBenchmark` can be called anywhere. This allows for -benchmark tests to be registered programmatically. - -Additionally `RegisterBenchmark` allows any callable object to be registered -as a benchmark. Including capturing lambdas and function objects. - -For Example: -```c++ -auto BM_test = [](benchmark::State& st, auto Inputs) { /* ... */ }; - -int main(int argc, char** argv) { - for (auto& test_input : { /* ... */ }) - benchmark::RegisterBenchmark(test_input.name(), BM_test, test_input); - benchmark::Initialize(&argc, argv); - benchmark::RunSpecifiedBenchmarks(); -} -``` - - - -### Exiting with an Error - -When errors caused by external influences, such as file I/O and network -communication, occur within a benchmark the -`State::SkipWithError(const char* msg)` function can be used to skip that run -of benchmark and report the error. Note that only future iterations of the -`KeepRunning()` are skipped. For the ranged-for version of the benchmark loop -Users must explicitly exit the loop, otherwise all iterations will be performed. -Users may explicitly return to exit the benchmark immediately. - -The `SkipWithError(...)` function may be used at any point within the benchmark, -including before and after the benchmark loop. Moreover, if `SkipWithError(...)` -has been used, it is not required to reach the benchmark loop and one may return -from the benchmark function early. - -For example: - -```c++ -static void BM_test(benchmark::State& state) { - auto resource = GetResource(); - if (!resource.good()) { - state.SkipWithError("Resource is not good!"); - // KeepRunning() loop will not be entered. - } - while (state.KeepRunning()) { - auto data = resource.read_data(); - if (!resource.good()) { - state.SkipWithError("Failed to read data!"); - break; // Needed to skip the rest of the iteration. - } - do_stuff(data); - } -} - -static void BM_test_ranged_fo(benchmark::State & state) { - auto resource = GetResource(); - if (!resource.good()) { - state.SkipWithError("Resource is not good!"); - return; // Early return is allowed when SkipWithError() has been used. - } - for (auto _ : state) { - auto data = resource.read_data(); - if (!resource.good()) { - state.SkipWithError("Failed to read data!"); - break; // REQUIRED to prevent all further iterations. - } - do_stuff(data); - } -} -``` - - -### A Faster KeepRunning Loop - -In C++11 mode, a ranged-based for loop should be used in preference to -the `KeepRunning` loop for running the benchmarks. For example: - -```c++ -static void BM_Fast(benchmark::State &state) { - for (auto _ : state) { - FastOperation(); - } -} -BENCHMARK(BM_Fast); -``` - -The reason the ranged-for loop is faster than using `KeepRunning`, is -because `KeepRunning` requires a memory load and store of the iteration count -ever iteration, whereas the ranged-for variant is able to keep the iteration count -in a register. - -For example, an empty inner loop of using the ranged-based for method looks like: - -```asm -# Loop Init - mov rbx, qword ptr [r14 + 104] - call benchmark::State::StartKeepRunning() - test rbx, rbx - je .LoopEnd -.LoopHeader: # =>This Inner Loop Header: Depth=1 - add rbx, -1 - jne .LoopHeader -.LoopEnd: -``` - -Compared to an empty `KeepRunning` loop, which looks like: - -```asm -.LoopHeader: # in Loop: Header=BB0_3 Depth=1 - cmp byte ptr [rbx], 1 - jne .LoopInit -.LoopBody: # =>This Inner Loop Header: Depth=1 - mov rax, qword ptr [rbx + 8] - lea rcx, [rax + 1] - mov qword ptr [rbx + 8], rcx - cmp rax, qword ptr [rbx + 104] - jb .LoopHeader - jmp .LoopEnd -.LoopInit: - mov rdi, rbx - call benchmark::State::StartKeepRunning() - jmp .LoopBody -.LoopEnd: -``` - -Unless C++03 compatibility is required, the ranged-for variant of writing -the benchmark loop should be preferred. - - - -### Disabling CPU Frequency Scaling - -If you see this error: - -``` -***WARNING*** CPU scaling is enabled, the benchmark real time measurements may be noisy and will incur extra overhead. -``` - -you might want to disable the CPU frequency scaling while running the benchmark: - -```bash -sudo cpupower frequency-set --governor performance -./mybench -sudo cpupower frequency-set --governor powersave -``` diff --git a/benchmarks/thirdparty/benchmark/WORKSPACE b/benchmarks/thirdparty/benchmark/WORKSPACE deleted file mode 100755 index c00d12cd17..0000000000 --- a/benchmarks/thirdparty/benchmark/WORKSPACE +++ /dev/null @@ -1,36 +0,0 @@ -workspace(name = "com_github_google_benchmark") - -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") - -http_archive( - name = "rules_cc", - strip_prefix = "rules_cc-a508235df92e71d537fcbae0c7c952ea6957a912", - urls = ["https://github.com/bazelbuild/rules_cc/archive/a508235df92e71d537fcbae0c7c952ea6957a912.zip"], -) - -http_archive( - name = "com_google_absl", - sha256 = "f41868f7a938605c92936230081175d1eae87f6ea2c248f41077c8f88316f111", - strip_prefix = "abseil-cpp-20200225.2", - urls = ["https://github.com/abseil/abseil-cpp/archive/20200225.2.tar.gz"], -) - -http_archive( - name = "com_google_googletest", - strip_prefix = "googletest-3f0cf6b62ad1eb50d8736538363d3580dd640c3e", - urls = ["https://github.com/google/googletest/archive/3f0cf6b62ad1eb50d8736538363d3580dd640c3e.zip"], -) - -http_archive( - name = "pybind11", - build_file = "@//bindings/python:pybind11.BUILD", - sha256 = "1eed57bc6863190e35637290f97a20c81cfe4d9090ac0a24f3bbf08f265eb71d", - strip_prefix = "pybind11-2.4.3", - urls = ["https://github.com/pybind/pybind11/archive/v2.4.3.tar.gz"], -) - -new_local_repository( - name = "python_headers", - build_file = "@//bindings/python:python_headers.BUILD", - path = "/usr/include/python3.6", # May be overwritten by setup.py. -) diff --git a/benchmarks/thirdparty/benchmark/_config.yml b/benchmarks/thirdparty/benchmark/_config.yml deleted file mode 100755 index 18854876c6..0000000000 --- a/benchmarks/thirdparty/benchmark/_config.yml +++ /dev/null @@ -1 +0,0 @@ -theme: jekyll-theme-midnight \ No newline at end of file diff --git a/benchmarks/thirdparty/benchmark/appveyor.yml b/benchmarks/thirdparty/benchmark/appveyor.yml deleted file mode 100755 index 81da955f02..0000000000 --- a/benchmarks/thirdparty/benchmark/appveyor.yml +++ /dev/null @@ -1,50 +0,0 @@ -version: '{build}' - -image: Visual Studio 2017 - -configuration: - - Debug - - Release - -environment: - matrix: - - compiler: msvc-15-seh - generator: "Visual Studio 15 2017" - - - compiler: msvc-15-seh - generator: "Visual Studio 15 2017 Win64" - - - compiler: msvc-14-seh - generator: "Visual Studio 14 2015" - - - compiler: msvc-14-seh - generator: "Visual Studio 14 2015 Win64" - - - compiler: gcc-5.3.0-posix - generator: "MinGW Makefiles" - cxx_path: 'C:\mingw-w64\i686-5.3.0-posix-dwarf-rt_v4-rev0\mingw32\bin' - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015 - -matrix: - fast_finish: true - -install: - # git bash conflicts with MinGW makefiles - - if "%generator%"=="MinGW Makefiles" (set "PATH=%PATH:C:\Program Files\Git\usr\bin;=%") - - if not "%cxx_path%"=="" (set "PATH=%PATH%;%cxx_path%") - -build_script: - - md _build -Force - - cd _build - - echo %configuration% - - cmake -G "%generator%" "-DCMAKE_BUILD_TYPE=%configuration%" -DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON .. - - cmake --build . --config %configuration% - -test_script: - - ctest --build-config %configuration% --timeout 300 --output-on-failure - -artifacts: - - path: '_build/CMakeFiles/*.log' - name: logs - - path: '_build/Testing/**/*.xml' - name: test_results diff --git a/benchmarks/thirdparty/benchmark/bindings/python/build_defs.bzl b/benchmarks/thirdparty/benchmark/bindings/python/build_defs.bzl deleted file mode 100755 index 45907aaa5e..0000000000 --- a/benchmarks/thirdparty/benchmark/bindings/python/build_defs.bzl +++ /dev/null @@ -1,25 +0,0 @@ -_SHARED_LIB_SUFFIX = { - "//conditions:default": ".so", - "//:windows": ".dll", -} - -def py_extension(name, srcs, hdrs = [], copts = [], features = [], deps = []): - for shared_lib_suffix in _SHARED_LIB_SUFFIX.values(): - shared_lib_name = name + shared_lib_suffix - native.cc_binary( - name = shared_lib_name, - linkshared = 1, - linkstatic = 1, - srcs = srcs + hdrs, - copts = copts, - features = features, - deps = deps, - ) - - return native.py_library( - name = name, - data = select({ - platform: [name + shared_lib_suffix] - for platform, shared_lib_suffix in _SHARED_LIB_SUFFIX.items() - }), - ) diff --git a/benchmarks/thirdparty/benchmark/bindings/python/google_benchmark/__init__.py b/benchmarks/thirdparty/benchmark/bindings/python/google_benchmark/__init__.py deleted file mode 100755 index 787c423d5d..0000000000 --- a/benchmarks/thirdparty/benchmark/bindings/python/google_benchmark/__init__.py +++ /dev/null @@ -1,156 +0,0 @@ -# Copyright 2020 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Python benchmarking utilities. - -Example usage: - import google_benchmark as benchmark - - @benchmark.register - def my_benchmark(state): - ... # Code executed outside `while` loop is not timed. - - while state: - ... # Code executed within `while` loop is timed. - - if __name__ == '__main__': - benchmark.main() -""" - -from absl import app -from google_benchmark import _benchmark -from google_benchmark._benchmark import ( - Counter, - kNanosecond, - kMicrosecond, - kMillisecond, - oNone, - o1, - oN, - oNSquared, - oNCubed, - oLogN, - oNLogN, - oAuto, - oLambda, -) - - -__all__ = [ - "register", - "main", - "Counter", - "kNanosecond", - "kMicrosecond", - "kMillisecond", - "oNone", - "o1", - "oN", - "oNSquared", - "oNCubed", - "oLogN", - "oNLogN", - "oAuto", - "oLambda", -] - -__version__ = "0.2.0" - - -class __OptionMaker: - """A stateless class to collect benchmark options. - - Collect all decorator calls like @option.range(start=0, limit=1<<5). - """ - - class Options: - """Pure data class to store options calls, along with the benchmarked function.""" - - def __init__(self, func): - self.func = func - self.builder_calls = [] - - @classmethod - def make(cls, func_or_options): - """Make Options from Options or the benchmarked function.""" - if isinstance(func_or_options, cls.Options): - return func_or_options - return cls.Options(func_or_options) - - def __getattr__(self, builder_name): - """Append option call in the Options.""" - - # The function that get returned on @option.range(start=0, limit=1<<5). - def __builder_method(*args, **kwargs): - - # The decorator that get called, either with the benchmared function - # or the previous Options - def __decorator(func_or_options): - options = self.make(func_or_options) - options.builder_calls.append((builder_name, args, kwargs)) - # The decorator returns Options so it is not technically a decorator - # and needs a final call to @regiser - return options - - return __decorator - - return __builder_method - - -# Alias for nicer API. -# We have to instanciate an object, even if stateless, to be able to use __getattr__ -# on option.range -option = __OptionMaker() - - -def register(undefined=None, *, name=None): - """Register function for benchmarking.""" - if undefined is None: - # Decorator is called without parenthesis so we return a decorator - return lambda f: register(f, name=name) - - # We have either the function to benchmark (simple case) or an instance of Options - # (@option._ case). - options = __OptionMaker.make(undefined) - - if name is None: - name = options.func.__name__ - - # We register the benchmark and reproduce all the @option._ calls onto the - # benchmark builder pattern - benchmark = _benchmark.RegisterBenchmark(name, options.func) - for name, args, kwargs in options.builder_calls[::-1]: - getattr(benchmark, name)(*args, **kwargs) - - # return the benchmarked function because the decorator does not modify it - return options.func - - -def _flags_parser(argv): - argv = _benchmark.Initialize(argv) - return app.parse_flags_with_usage(argv) - - -def _run_benchmarks(argv): - if len(argv) > 1: - raise app.UsageError("Too many command-line arguments.") - return _benchmark.RunSpecifiedBenchmarks() - - -def main(argv=None): - return app.run(_run_benchmarks, argv=argv, flags_parser=_flags_parser) - - -# Methods for use with custom main function. -initialize = _benchmark.Initialize -run_benchmarks = _benchmark.RunSpecifiedBenchmarks diff --git a/benchmarks/thirdparty/benchmark/bindings/python/google_benchmark/benchmark.cc b/benchmarks/thirdparty/benchmark/bindings/python/google_benchmark/benchmark.cc deleted file mode 100755 index a733339769..0000000000 --- a/benchmarks/thirdparty/benchmark/bindings/python/google_benchmark/benchmark.cc +++ /dev/null @@ -1,180 +0,0 @@ -// Benchmark for Python. - -#include -#include -#include - -#include "pybind11/operators.h" -#include "pybind11/pybind11.h" -#include "pybind11/stl.h" -#include "pybind11/stl_bind.h" - -#include "benchmark/benchmark.h" - -PYBIND11_MAKE_OPAQUE(benchmark::UserCounters); - -namespace { -namespace py = ::pybind11; - -std::vector Initialize(const std::vector& argv) { - // The `argv` pointers here become invalid when this function returns, but - // benchmark holds the pointer to `argv[0]`. We create a static copy of it - // so it persists, and replace the pointer below. - static std::string executable_name(argv[0]); - std::vector ptrs; - ptrs.reserve(argv.size()); - for (auto& arg : argv) { - ptrs.push_back(const_cast(arg.c_str())); - } - ptrs[0] = const_cast(executable_name.c_str()); - int argc = static_cast(argv.size()); - benchmark::Initialize(&argc, ptrs.data()); - std::vector remaining_argv; - remaining_argv.reserve(argc); - for (int i = 0; i < argc; ++i) { - remaining_argv.emplace_back(ptrs[i]); - } - return remaining_argv; -} - -benchmark::internal::Benchmark* RegisterBenchmark(const char* name, - py::function f) { - return benchmark::RegisterBenchmark( - name, [f](benchmark::State& state) { f(&state); }); -} - -PYBIND11_MODULE(_benchmark, m) { - using benchmark::TimeUnit; - py::enum_(m, "TimeUnit") - .value("kNanosecond", TimeUnit::kNanosecond) - .value("kMicrosecond", TimeUnit::kMicrosecond) - .value("kMillisecond", TimeUnit::kMillisecond) - .export_values(); - - using benchmark::BigO; - py::enum_(m, "BigO") - .value("oNone", BigO::oNone) - .value("o1", BigO::o1) - .value("oN", BigO::oN) - .value("oNSquared", BigO::oNSquared) - .value("oNCubed", BigO::oNCubed) - .value("oLogN", BigO::oLogN) - .value("oNLogN", BigO::oLogN) - .value("oAuto", BigO::oAuto) - .value("oLambda", BigO::oLambda) - .export_values(); - - using benchmark::internal::Benchmark; - py::class_(m, "Benchmark") - // For methods returning a pointer tor the current object, reference - // return policy is used to ask pybind not to take ownership oof the - // returned object and avoid calling delete on it. - // https://pybind11.readthedocs.io/en/stable/advanced/functions.html#return-value-policies - // - // For methods taking a const std::vector<...>&, a copy is created - // because a it is bound to a Python list. - // https://pybind11.readthedocs.io/en/stable/advanced/cast/stl.html - .def("unit", &Benchmark::Unit, py::return_value_policy::reference) - .def("arg", &Benchmark::Arg, py::return_value_policy::reference) - .def("args", &Benchmark::Args, py::return_value_policy::reference) - .def("range", &Benchmark::Range, py::return_value_policy::reference, - py::arg("start"), py::arg("limit")) - .def("dense_range", &Benchmark::DenseRange, - py::return_value_policy::reference, py::arg("start"), - py::arg("limit"), py::arg("step") = 1) - .def("ranges", &Benchmark::Ranges, py::return_value_policy::reference) - .def("args_product", &Benchmark::ArgsProduct, - py::return_value_policy::reference) - .def("arg_name", &Benchmark::ArgName, py::return_value_policy::reference) - .def("arg_names", &Benchmark::ArgNames, - py::return_value_policy::reference) - .def("range_pair", &Benchmark::RangePair, - py::return_value_policy::reference, py::arg("lo1"), py::arg("hi1"), - py::arg("lo2"), py::arg("hi2")) - .def("range_multiplier", &Benchmark::RangeMultiplier, - py::return_value_policy::reference) - .def("min_time", &Benchmark::MinTime, py::return_value_policy::reference) - .def("iterations", &Benchmark::Iterations, - py::return_value_policy::reference) - .def("repetitions", &Benchmark::Repetitions, - py::return_value_policy::reference) - .def("report_aggregates_only", &Benchmark::ReportAggregatesOnly, - py::return_value_policy::reference, py::arg("value") = true) - .def("display_aggregates_only", &Benchmark::DisplayAggregatesOnly, - py::return_value_policy::reference, py::arg("value") = true) - .def("measure_process_cpu_time", &Benchmark::MeasureProcessCPUTime, - py::return_value_policy::reference) - .def("use_real_time", &Benchmark::UseRealTime, - py::return_value_policy::reference) - .def("use_manual_time", &Benchmark::UseManualTime, - py::return_value_policy::reference) - .def( - "complexity", - (Benchmark * (Benchmark::*)(benchmark::BigO)) & Benchmark::Complexity, - py::return_value_policy::reference, - py::arg("complexity") = benchmark::oAuto); - - using benchmark::Counter; - py::class_ py_counter(m, "Counter"); - - py::enum_(py_counter, "Flags") - .value("kDefaults", Counter::Flags::kDefaults) - .value("kIsRate", Counter::Flags::kIsRate) - .value("kAvgThreads", Counter::Flags::kAvgThreads) - .value("kAvgThreadsRate", Counter::Flags::kAvgThreadsRate) - .value("kIsIterationInvariant", Counter::Flags::kIsIterationInvariant) - .value("kIsIterationInvariantRate", - Counter::Flags::kIsIterationInvariantRate) - .value("kAvgIterations", Counter::Flags::kAvgIterations) - .value("kAvgIterationsRate", Counter::Flags::kAvgIterationsRate) - .value("kInvert", Counter::Flags::kInvert) - .export_values() - .def(py::self | py::self); - - py::enum_(py_counter, "OneK") - .value("kIs1000", Counter::OneK::kIs1000) - .value("kIs1024", Counter::OneK::kIs1024) - .export_values(); - - py_counter - .def(py::init(), - py::arg("value") = 0., py::arg("flags") = Counter::kDefaults, - py::arg("k") = Counter::kIs1000) - .def(py::init([](double value) { return Counter(value); })) - .def_readwrite("value", &Counter::value) - .def_readwrite("flags", &Counter::flags) - .def_readwrite("oneK", &Counter::oneK); - py::implicitly_convertible(); - py::implicitly_convertible(); - - py::bind_map(m, "UserCounters"); - - using benchmark::State; - py::class_(m, "State") - .def("__bool__", &State::KeepRunning) - .def_property_readonly("keep_running", &State::KeepRunning) - .def("pause_timing", &State::PauseTiming) - .def("resume_timing", &State::ResumeTiming) - .def("skip_with_error", &State::SkipWithError) - .def_property_readonly("error_occured", &State::error_occurred) - .def("set_iteration_time", &State::SetIterationTime) - .def_property("bytes_processed", &State::bytes_processed, - &State::SetBytesProcessed) - .def_property("complexity_n", &State::complexity_length_n, - &State::SetComplexityN) - .def_property("items_processed", &State::items_processed, - &State::SetItemsProcessed) - .def("set_label", (void (State::*)(const char*)) & State::SetLabel) - .def("range", &State::range, py::arg("pos") = 0) - .def_property_readonly("iterations", &State::iterations) - .def_readwrite("counters", &State::counters) - .def_readonly("thread_index", &State::thread_index) - .def_readonly("threads", &State::threads); - - m.def("Initialize", Initialize); - m.def("RegisterBenchmark", RegisterBenchmark, - py::return_value_policy::reference); - m.def("RunSpecifiedBenchmarks", - []() { benchmark::RunSpecifiedBenchmarks(); }); -}; -} // namespace diff --git a/benchmarks/thirdparty/benchmark/bindings/python/google_benchmark/example.py b/benchmarks/thirdparty/benchmark/bindings/python/google_benchmark/example.py deleted file mode 100755 index 9134e8cffe..0000000000 --- a/benchmarks/thirdparty/benchmark/bindings/python/google_benchmark/example.py +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright 2020 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Example of Python using C++ benchmark framework. - -To run this example, you must first install the `google_benchmark` Python package. - -To install using `setup.py`, download and extract the `google_benchmark` source. -In the extracted directory, execute: - python setup.py install -""" - -import random -import time - -import google_benchmark as benchmark -from google_benchmark import Counter - - -@benchmark.register -def empty(state): - while state: - pass - - -@benchmark.register -def sum_million(state): - while state: - sum(range(1_000_000)) - -@benchmark.register -def pause_timing(state): - """Pause timing every iteration.""" - while state: - # Construct a list of random ints every iteration without timing it - state.pause_timing() - random_list = [random.randint(0, 100) for _ in range(100)] - state.resume_timing() - # Time the in place sorting algorithm - random_list.sort() - - -@benchmark.register -def skipped(state): - if True: # Test some predicate here. - state.skip_with_error("some error") - return # NOTE: You must explicitly return, or benchmark will continue. - - ... # Benchmark code would be here. - - -@benchmark.register -def manual_timing(state): - while state: - # Manually count Python CPU time - start = time.perf_counter() # perf_counter_ns() in Python 3.7+ - # Something to benchmark - time.sleep(0.01) - end = time.perf_counter() - state.set_iteration_time(end - start) - - -@benchmark.register -def custom_counters(state): - """Collect cutom metric using benchmark.Counter.""" - num_foo = 0.0 - while state: - # Benchmark some code here - pass - # Collect some custom metric named foo - num_foo += 0.13 - - # Automatic Counter from numbers. - state.counters["foo"] = num_foo - # Set a counter as a rate. - state.counters["foo_rate"] = Counter(num_foo, Counter.kIsRate) - # Set a counter as an inverse of rate. - state.counters["foo_inv_rate"] = Counter(num_foo, Counter.kIsRate | Counter.kInvert) - # Set a counter as a thread-average quantity. - state.counters["foo_avg"] = Counter(num_foo, Counter.kAvgThreads) - # There's also a combined flag: - state.counters["foo_avg_rate"] = Counter(num_foo, Counter.kAvgThreadsRate) - - -@benchmark.register -@benchmark.option.measure_process_cpu_time() -@benchmark.option.use_real_time() -def with_options(state): - while state: - sum(range(1_000_000)) - - -@benchmark.register(name="sum_million_microseconds") -@benchmark.option.unit(benchmark.kMicrosecond) -def with_options(state): - while state: - sum(range(1_000_000)) - - -@benchmark.register -@benchmark.option.arg(100) -@benchmark.option.arg(1000) -def passing_argument(state): - while state: - sum(range(state.range(0))) - - -@benchmark.register -@benchmark.option.range(8, limit=8 << 10) -def using_range(state): - while state: - sum(range(state.range(0))) - - -@benchmark.register -@benchmark.option.range_multiplier(2) -@benchmark.option.range(1 << 10, 1 << 18) -@benchmark.option.complexity(benchmark.oN) -def computing_complexity(state): - while state: - sum(range(state.range(0))) - state.complexity_n = state.range(0) - - -if __name__ == "__main__": - benchmark.main() diff --git a/benchmarks/thirdparty/benchmark/bindings/python/pybind11.BUILD b/benchmarks/thirdparty/benchmark/bindings/python/pybind11.BUILD deleted file mode 100755 index bc83350038..0000000000 --- a/benchmarks/thirdparty/benchmark/bindings/python/pybind11.BUILD +++ /dev/null @@ -1,20 +0,0 @@ -cc_library( - name = "pybind11", - hdrs = glob( - include = [ - "include/pybind11/*.h", - "include/pybind11/detail/*.h", - ], - exclude = [ - "include/pybind11/common.h", - "include/pybind11/eigen.h", - ], - ), - copts = [ - "-fexceptions", - "-Wno-undefined-inline", - "-Wno-pragma-once-outside-header", - ], - includes = ["include"], - visibility = ["//visibility:public"], -) diff --git a/benchmarks/thirdparty/benchmark/bindings/python/python_headers.BUILD b/benchmarks/thirdparty/benchmark/bindings/python/python_headers.BUILD deleted file mode 100755 index 9c34cf6ca4..0000000000 --- a/benchmarks/thirdparty/benchmark/bindings/python/python_headers.BUILD +++ /dev/null @@ -1,6 +0,0 @@ -cc_library( - name = "python_headers", - hdrs = glob(["**/*.h"]), - includes = ["."], - visibility = ["//visibility:public"], -) diff --git a/benchmarks/thirdparty/benchmark/bindings/python/requirements.txt b/benchmarks/thirdparty/benchmark/bindings/python/requirements.txt deleted file mode 100755 index f5bbe7eca5..0000000000 --- a/benchmarks/thirdparty/benchmark/bindings/python/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -absl-py>=0.7.1 - diff --git a/benchmarks/thirdparty/benchmark/cmake/AddCXXCompilerFlag.cmake b/benchmarks/thirdparty/benchmark/cmake/AddCXXCompilerFlag.cmake deleted file mode 100755 index d0d2099814..0000000000 --- a/benchmarks/thirdparty/benchmark/cmake/AddCXXCompilerFlag.cmake +++ /dev/null @@ -1,74 +0,0 @@ -# - Adds a compiler flag if it is supported by the compiler -# -# This function checks that the supplied compiler flag is supported and then -# adds it to the corresponding compiler flags -# -# add_cxx_compiler_flag( []) -# -# - Example -# -# include(AddCXXCompilerFlag) -# add_cxx_compiler_flag(-Wall) -# add_cxx_compiler_flag(-no-strict-aliasing RELEASE) -# Requires CMake 2.6+ - -if(__add_cxx_compiler_flag) - return() -endif() -set(__add_cxx_compiler_flag INCLUDED) - -include(CheckCXXCompilerFlag) - -function(mangle_compiler_flag FLAG OUTPUT) - string(TOUPPER "HAVE_CXX_FLAG_${FLAG}" SANITIZED_FLAG) - string(REPLACE "+" "X" SANITIZED_FLAG ${SANITIZED_FLAG}) - string(REGEX REPLACE "[^A-Za-z_0-9]" "_" SANITIZED_FLAG ${SANITIZED_FLAG}) - string(REGEX REPLACE "_+" "_" SANITIZED_FLAG ${SANITIZED_FLAG}) - set(${OUTPUT} "${SANITIZED_FLAG}" PARENT_SCOPE) -endfunction(mangle_compiler_flag) - -function(add_cxx_compiler_flag FLAG) - mangle_compiler_flag("${FLAG}" MANGLED_FLAG) - set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}") - set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${FLAG}") - check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG}) - set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}") - if(${MANGLED_FLAG}) - set(VARIANT ${ARGV1}) - if(ARGV1) - string(TOUPPER "_${VARIANT}" VARIANT) - endif() - set(CMAKE_CXX_FLAGS${VARIANT} "${CMAKE_CXX_FLAGS${VARIANT}} ${BENCHMARK_CXX_FLAGS${VARIANT}} ${FLAG}" PARENT_SCOPE) - endif() -endfunction() - -function(add_required_cxx_compiler_flag FLAG) - mangle_compiler_flag("${FLAG}" MANGLED_FLAG) - set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}") - set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${FLAG}") - check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG}) - set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}") - if(${MANGLED_FLAG}) - set(VARIANT ${ARGV1}) - if(ARGV1) - string(TOUPPER "_${VARIANT}" VARIANT) - endif() - set(CMAKE_CXX_FLAGS${VARIANT} "${CMAKE_CXX_FLAGS${VARIANT}} ${FLAG}" PARENT_SCOPE) - set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE) - set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE) - set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE) - set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${FLAG}" PARENT_SCOPE) - else() - message(FATAL_ERROR "Required flag '${FLAG}' is not supported by the compiler") - endif() -endfunction() - -function(check_cxx_warning_flag FLAG) - mangle_compiler_flag("${FLAG}" MANGLED_FLAG) - set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}") - # Add -Werror to ensure the compiler generates an error if the warning flag - # doesn't exist. - set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -Werror ${FLAG}") - check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG}) - set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}") -endfunction() diff --git a/benchmarks/thirdparty/benchmark/cmake/CXXFeatureCheck.cmake b/benchmarks/thirdparty/benchmark/cmake/CXXFeatureCheck.cmake deleted file mode 100755 index 62e6741fe3..0000000000 --- a/benchmarks/thirdparty/benchmark/cmake/CXXFeatureCheck.cmake +++ /dev/null @@ -1,69 +0,0 @@ -# - Compile and run code to check for C++ features -# -# This functions compiles a source file under the `cmake` folder -# and adds the corresponding `HAVE_[FILENAME]` flag to the CMake -# environment -# -# cxx_feature_check( []) -# -# - Example -# -# include(CXXFeatureCheck) -# cxx_feature_check(STD_REGEX) -# Requires CMake 2.8.12+ - -if(__cxx_feature_check) - return() -endif() -set(__cxx_feature_check INCLUDED) - -function(cxx_feature_check FILE) - string(TOLOWER ${FILE} FILE) - string(TOUPPER ${FILE} VAR) - string(TOUPPER "HAVE_${VAR}" FEATURE) - if (DEFINED HAVE_${VAR}) - set(HAVE_${VAR} 1 PARENT_SCOPE) - add_definitions(-DHAVE_${VAR}) - return() - endif() - - if (ARGC GREATER 1) - message(STATUS "Enabling additional flags: ${ARGV1}") - list(APPEND BENCHMARK_CXX_LINKER_FLAGS ${ARGV1}) - endif() - - if (NOT DEFINED COMPILE_${FEATURE}) - message(STATUS "Performing Test ${FEATURE}") - if(CMAKE_CROSSCOMPILING) - try_compile(COMPILE_${FEATURE} - ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp - CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS} - LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES}) - if(COMPILE_${FEATURE}) - message(WARNING - "If you see build failures due to cross compilation, try setting HAVE_${VAR} to 0") - set(RUN_${FEATURE} 0 CACHE INTERNAL "") - else() - set(RUN_${FEATURE} 1 CACHE INTERNAL "") - endif() - else() - message(STATUS "Performing Test ${FEATURE}") - try_run(RUN_${FEATURE} COMPILE_${FEATURE} - ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp - CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS} - LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES}) - endif() - endif() - - if(RUN_${FEATURE} EQUAL 0) - message(STATUS "Performing Test ${FEATURE} -- success") - set(HAVE_${VAR} 1 PARENT_SCOPE) - add_definitions(-DHAVE_${VAR}) - else() - if(NOT COMPILE_${FEATURE}) - message(STATUS "Performing Test ${FEATURE} -- failed to compile") - else() - message(STATUS "Performing Test ${FEATURE} -- compiled but failed to run") - endif() - endif() -endfunction() diff --git a/benchmarks/thirdparty/benchmark/cmake/Config.cmake.in b/benchmarks/thirdparty/benchmark/cmake/Config.cmake.in deleted file mode 100755 index 6e9256eea8..0000000000 --- a/benchmarks/thirdparty/benchmark/cmake/Config.cmake.in +++ /dev/null @@ -1 +0,0 @@ -include("${CMAKE_CURRENT_LIST_DIR}/@targets_export_name@.cmake") diff --git a/benchmarks/thirdparty/benchmark/cmake/GetGitVersion.cmake b/benchmarks/thirdparty/benchmark/cmake/GetGitVersion.cmake deleted file mode 100755 index 4f10f226d7..0000000000 --- a/benchmarks/thirdparty/benchmark/cmake/GetGitVersion.cmake +++ /dev/null @@ -1,54 +0,0 @@ -# - Returns a version string from Git tags -# -# This function inspects the annotated git tags for the project and returns a string -# into a CMake variable -# -# get_git_version() -# -# - Example -# -# include(GetGitVersion) -# get_git_version(GIT_VERSION) -# -# Requires CMake 2.8.11+ -find_package(Git) - -if(__get_git_version) - return() -endif() -set(__get_git_version INCLUDED) - -function(get_git_version var) - if(GIT_EXECUTABLE) - execute_process(COMMAND ${GIT_EXECUTABLE} describe --match "v[0-9]*.[0-9]*.[0-9]*" --abbrev=8 - WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} - RESULT_VARIABLE status - OUTPUT_VARIABLE GIT_VERSION - ERROR_QUIET) - if(${status}) - set(GIT_VERSION "v0.0.0") - else() - string(STRIP ${GIT_VERSION} GIT_VERSION) - string(REGEX REPLACE "-[0-9]+-g" "-" GIT_VERSION ${GIT_VERSION}) - endif() - - # Work out if the repository is dirty - execute_process(COMMAND ${GIT_EXECUTABLE} update-index -q --refresh - WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} - OUTPUT_QUIET - ERROR_QUIET) - execute_process(COMMAND ${GIT_EXECUTABLE} diff-index --name-only HEAD -- - WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} - OUTPUT_VARIABLE GIT_DIFF_INDEX - ERROR_QUIET) - string(COMPARE NOTEQUAL "${GIT_DIFF_INDEX}" "" GIT_DIRTY) - if (${GIT_DIRTY}) - set(GIT_VERSION "${GIT_VERSION}-dirty") - endif() - else() - set(GIT_VERSION "v0.0.0") - endif() - - message(STATUS "git Version: ${GIT_VERSION}") - set(${var} ${GIT_VERSION} PARENT_SCOPE) -endfunction() diff --git a/benchmarks/thirdparty/benchmark/cmake/GoogleTest.cmake b/benchmarks/thirdparty/benchmark/cmake/GoogleTest.cmake deleted file mode 100755 index dd611fc875..0000000000 --- a/benchmarks/thirdparty/benchmark/cmake/GoogleTest.cmake +++ /dev/null @@ -1,41 +0,0 @@ -# Download and unpack googletest at configure time -set(GOOGLETEST_PREFIX "${benchmark_BINARY_DIR}/third_party/googletest") -configure_file(${benchmark_SOURCE_DIR}/cmake/GoogleTest.cmake.in ${GOOGLETEST_PREFIX}/CMakeLists.txt @ONLY) - -set(GOOGLETEST_PATH "${CMAKE_CURRENT_SOURCE_DIR}/googletest" CACHE PATH "") # Mind the quotes -execute_process(COMMAND ${CMAKE_COMMAND} -G "${CMAKE_GENERATOR}" - -DALLOW_DOWNLOADING_GOOGLETEST=${BENCHMARK_DOWNLOAD_DEPENDENCIES} -DGOOGLETEST_PATH:PATH=${GOOGLETEST_PATH} . - RESULT_VARIABLE result - WORKING_DIRECTORY ${GOOGLETEST_PREFIX} -) - -if(result) - message(FATAL_ERROR "CMake step for googletest failed: ${result}") -endif() - -execute_process( - COMMAND ${CMAKE_COMMAND} --build . - RESULT_VARIABLE result - WORKING_DIRECTORY ${GOOGLETEST_PREFIX} -) - -if(result) - message(FATAL_ERROR "Build step for googletest failed: ${result}") -endif() - -# Prevent overriding the parent project's compiler/linker -# settings on Windows -set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) - -include(${GOOGLETEST_PREFIX}/googletest-paths.cmake) - -# Add googletest directly to our build. This defines -# the gtest and gtest_main targets. -add_subdirectory(${GOOGLETEST_SOURCE_DIR} - ${GOOGLETEST_BINARY_DIR} - EXCLUDE_FROM_ALL) - -set_target_properties(gtest PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES $) -set_target_properties(gtest_main PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES $) -set_target_properties(gmock PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES $) -set_target_properties(gmock_main PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES $) diff --git a/benchmarks/thirdparty/benchmark/cmake/GoogleTest.cmake.in b/benchmarks/thirdparty/benchmark/cmake/GoogleTest.cmake.in deleted file mode 100755 index fd957ff564..0000000000 --- a/benchmarks/thirdparty/benchmark/cmake/GoogleTest.cmake.in +++ /dev/null @@ -1,58 +0,0 @@ -cmake_minimum_required(VERSION 2.8.12) - -project(googletest-download NONE) - -# Enable ExternalProject CMake module -include(ExternalProject) - -option(ALLOW_DOWNLOADING_GOOGLETEST "If googletest src tree is not found in location specified by GOOGLETEST_PATH, do fetch the archive from internet" OFF) -set(GOOGLETEST_PATH "/usr/src/googletest" CACHE PATH - "Path to the googletest root tree. Should contain googletest and googlemock subdirs. And CMakeLists.txt in root, and in both of these subdirs") - -# Download and install GoogleTest - -message(STATUS "Looking for Google Test sources") -message(STATUS "Looking for Google Test sources in ${GOOGLETEST_PATH}") -if(EXISTS "${GOOGLETEST_PATH}" AND IS_DIRECTORY "${GOOGLETEST_PATH}" AND EXISTS "${GOOGLETEST_PATH}/CMakeLists.txt" AND - EXISTS "${GOOGLETEST_PATH}/googletest" AND IS_DIRECTORY "${GOOGLETEST_PATH}/googletest" AND EXISTS "${GOOGLETEST_PATH}/googletest/CMakeLists.txt" AND - EXISTS "${GOOGLETEST_PATH}/googlemock" AND IS_DIRECTORY "${GOOGLETEST_PATH}/googlemock" AND EXISTS "${GOOGLETEST_PATH}/googlemock/CMakeLists.txt") - message(STATUS "Found Google Test in ${GOOGLETEST_PATH}") - - ExternalProject_Add( - googletest - PREFIX "${CMAKE_BINARY_DIR}" - DOWNLOAD_DIR "${CMAKE_BINARY_DIR}/download" - SOURCE_DIR "${GOOGLETEST_PATH}" # use existing src dir. - BINARY_DIR "${CMAKE_BINARY_DIR}/build" - CONFIGURE_COMMAND "" - BUILD_COMMAND "" - INSTALL_COMMAND "" - TEST_COMMAND "" - ) -else() - if(NOT ALLOW_DOWNLOADING_GOOGLETEST) - message(SEND_ERROR "Did not find Google Test sources! Either pass correct path in GOOGLETEST_PATH, or enable BENCHMARK_DOWNLOAD_DEPENDENCIES, or disable BENCHMARK_ENABLE_GTEST_TESTS / BENCHMARK_ENABLE_TESTING.") - else() - message(WARNING "Did not find Google Test sources! Fetching from web...") - ExternalProject_Add( - googletest - GIT_REPOSITORY https://github.com/google/googletest.git - GIT_TAG master - PREFIX "${CMAKE_BINARY_DIR}" - STAMP_DIR "${CMAKE_BINARY_DIR}/stamp" - DOWNLOAD_DIR "${CMAKE_BINARY_DIR}/download" - SOURCE_DIR "${CMAKE_BINARY_DIR}/src" - BINARY_DIR "${CMAKE_BINARY_DIR}/build" - CONFIGURE_COMMAND "" - BUILD_COMMAND "" - INSTALL_COMMAND "" - TEST_COMMAND "" - ) - endif() -endif() - -ExternalProject_Get_Property(googletest SOURCE_DIR BINARY_DIR) -file(WRITE googletest-paths.cmake -"set(GOOGLETEST_SOURCE_DIR \"${SOURCE_DIR}\") -set(GOOGLETEST_BINARY_DIR \"${BINARY_DIR}\") -") diff --git a/benchmarks/thirdparty/benchmark/cmake/benchmark.pc.in b/benchmarks/thirdparty/benchmark/cmake/benchmark.pc.in deleted file mode 100755 index 43ca8f91d7..0000000000 --- a/benchmarks/thirdparty/benchmark/cmake/benchmark.pc.in +++ /dev/null @@ -1,12 +0,0 @@ -prefix=@CMAKE_INSTALL_PREFIX@ -exec_prefix=${prefix} -libdir=${prefix}/lib -includedir=${prefix}/include - -Name: @PROJECT_NAME@ -Description: Google microbenchmark framework -Version: @VERSION@ - -Libs: -L${libdir} -lbenchmark -Libs.private: -lpthread -Cflags: -I${includedir} diff --git a/benchmarks/thirdparty/benchmark/cmake/gnu_posix_regex.cpp b/benchmarks/thirdparty/benchmark/cmake/gnu_posix_regex.cpp deleted file mode 100755 index b5b91cdab7..0000000000 --- a/benchmarks/thirdparty/benchmark/cmake/gnu_posix_regex.cpp +++ /dev/null @@ -1,12 +0,0 @@ -#include -#include -int main() { - std::string str = "test0159"; - regex_t re; - int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB); - if (ec != 0) { - return ec; - } - return regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0; -} - diff --git a/benchmarks/thirdparty/benchmark/cmake/llvm-toolchain.cmake b/benchmarks/thirdparty/benchmark/cmake/llvm-toolchain.cmake deleted file mode 100755 index fc119e52fd..0000000000 --- a/benchmarks/thirdparty/benchmark/cmake/llvm-toolchain.cmake +++ /dev/null @@ -1,8 +0,0 @@ -find_package(LLVMAr REQUIRED) -set(CMAKE_AR "${LLVMAR_EXECUTABLE}" CACHE FILEPATH "" FORCE) - -find_package(LLVMNm REQUIRED) -set(CMAKE_NM "${LLVMNM_EXECUTABLE}" CACHE FILEPATH "" FORCE) - -find_package(LLVMRanLib REQUIRED) -set(CMAKE_RANLIB "${LLVMRANLIB_EXECUTABLE}" CACHE FILEPATH "" FORCE) diff --git a/benchmarks/thirdparty/benchmark/cmake/posix_regex.cpp b/benchmarks/thirdparty/benchmark/cmake/posix_regex.cpp deleted file mode 100755 index 466dc62560..0000000000 --- a/benchmarks/thirdparty/benchmark/cmake/posix_regex.cpp +++ /dev/null @@ -1,14 +0,0 @@ -#include -#include -int main() { - std::string str = "test0159"; - regex_t re; - int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB); - if (ec != 0) { - return ec; - } - int ret = regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0; - regfree(&re); - return ret; -} - diff --git a/benchmarks/thirdparty/benchmark/cmake/split_list.cmake b/benchmarks/thirdparty/benchmark/cmake/split_list.cmake deleted file mode 100755 index 67aed3fdc8..0000000000 --- a/benchmarks/thirdparty/benchmark/cmake/split_list.cmake +++ /dev/null @@ -1,3 +0,0 @@ -macro(split_list listname) - string(REPLACE ";" " " ${listname} "${${listname}}") -endmacro() diff --git a/benchmarks/thirdparty/benchmark/cmake/std_regex.cpp b/benchmarks/thirdparty/benchmark/cmake/std_regex.cpp deleted file mode 100755 index 696f2a26bc..0000000000 --- a/benchmarks/thirdparty/benchmark/cmake/std_regex.cpp +++ /dev/null @@ -1,10 +0,0 @@ -#include -#include -int main() { - const std::string str = "test0159"; - std::regex re; - re = std::regex("^[a-z]+[0-9]+$", - std::regex_constants::extended | std::regex_constants::nosubs); - return std::regex_search(str, re) ? 0 : -1; -} - diff --git a/benchmarks/thirdparty/benchmark/cmake/steady_clock.cpp b/benchmarks/thirdparty/benchmark/cmake/steady_clock.cpp deleted file mode 100755 index 66d50d17e9..0000000000 --- a/benchmarks/thirdparty/benchmark/cmake/steady_clock.cpp +++ /dev/null @@ -1,7 +0,0 @@ -#include - -int main() { - typedef std::chrono::steady_clock Clock; - Clock::time_point tp = Clock::now(); - ((void)tp); -} diff --git a/benchmarks/thirdparty/benchmark/cmake/thread_safety_attributes.cpp b/benchmarks/thirdparty/benchmark/cmake/thread_safety_attributes.cpp deleted file mode 100755 index 46161babdb..0000000000 --- a/benchmarks/thirdparty/benchmark/cmake/thread_safety_attributes.cpp +++ /dev/null @@ -1,4 +0,0 @@ -#define HAVE_THREAD_SAFETY_ATTRIBUTES -#include "../src/mutex.h" - -int main() {} diff --git a/benchmarks/thirdparty/benchmark/conan/CMakeLists.txt b/benchmarks/thirdparty/benchmark/conan/CMakeLists.txt deleted file mode 100755 index 15b92ca91a..0000000000 --- a/benchmarks/thirdparty/benchmark/conan/CMakeLists.txt +++ /dev/null @@ -1,7 +0,0 @@ -cmake_minimum_required(VERSION 2.8.11) -project(cmake_wrapper) - -include(conanbuildinfo.cmake) -conan_basic_setup() - -include(${CMAKE_SOURCE_DIR}/CMakeListsOriginal.txt) diff --git a/benchmarks/thirdparty/benchmark/conan/test_package/CMakeLists.txt b/benchmarks/thirdparty/benchmark/conan/test_package/CMakeLists.txt deleted file mode 100755 index 089a6c729d..0000000000 --- a/benchmarks/thirdparty/benchmark/conan/test_package/CMakeLists.txt +++ /dev/null @@ -1,10 +0,0 @@ -cmake_minimum_required(VERSION 2.8.11) -project(test_package) - -set(CMAKE_VERBOSE_MAKEFILE TRUE) - -include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake) -conan_basic_setup() - -add_executable(${PROJECT_NAME} test_package.cpp) -target_link_libraries(${PROJECT_NAME} ${CONAN_LIBS}) diff --git a/benchmarks/thirdparty/benchmark/conan/test_package/conanfile.py b/benchmarks/thirdparty/benchmark/conan/test_package/conanfile.py deleted file mode 100755 index d63f4088c9..0000000000 --- a/benchmarks/thirdparty/benchmark/conan/test_package/conanfile.py +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -from conans import ConanFile, CMake -import os - - -class TestPackageConan(ConanFile): - settings = "os", "compiler", "build_type", "arch" - generators = "cmake" - - def build(self): - cmake = CMake(self) - cmake.configure() - cmake.build() - - def test(self): - bin_path = os.path.join("bin", "test_package") - self.run(bin_path, run_environment=True) diff --git a/benchmarks/thirdparty/benchmark/conan/test_package/test_package.cpp b/benchmarks/thirdparty/benchmark/conan/test_package/test_package.cpp deleted file mode 100755 index 4fa7ec0bf9..0000000000 --- a/benchmarks/thirdparty/benchmark/conan/test_package/test_package.cpp +++ /dev/null @@ -1,18 +0,0 @@ -#include "benchmark/benchmark.h" - -void BM_StringCreation(benchmark::State& state) { - while (state.KeepRunning()) - std::string empty_string; -} - -BENCHMARK(BM_StringCreation); - -void BM_StringCopy(benchmark::State& state) { - std::string x = "hello"; - while (state.KeepRunning()) - std::string copy(x); -} - -BENCHMARK(BM_StringCopy); - -BENCHMARK_MAIN(); diff --git a/benchmarks/thirdparty/benchmark/conanfile.py b/benchmarks/thirdparty/benchmark/conanfile.py deleted file mode 100755 index e31fc5268a..0000000000 --- a/benchmarks/thirdparty/benchmark/conanfile.py +++ /dev/null @@ -1,79 +0,0 @@ -from conans import ConanFile, CMake, tools -from conans.errors import ConanInvalidConfiguration -import shutil -import os - - -class GoogleBenchmarkConan(ConanFile): - name = "benchmark" - description = "A microbenchmark support library." - topics = ("conan", "benchmark", "google", "microbenchmark") - url = "https://github.com/google/benchmark" - homepage = "https://github.com/google/benchmark" - author = "Google Inc." - license = "Apache-2.0" - exports_sources = ["*"] - generators = "cmake" - - settings = "arch", "build_type", "compiler", "os" - options = { - "shared": [True, False], - "fPIC": [True, False], - "enable_lto": [True, False], - "enable_exceptions": [True, False] - } - default_options = {"shared": False, "fPIC": True, "enable_lto": False, "enable_exceptions": True} - - _build_subfolder = "." - - def source(self): - # Wrap the original CMake file to call conan_basic_setup - shutil.move("CMakeLists.txt", "CMakeListsOriginal.txt") - shutil.move(os.path.join("conan", "CMakeLists.txt"), "CMakeLists.txt") - - def config_options(self): - if self.settings.os == "Windows": - if self.settings.compiler == "Visual Studio" and float(self.settings.compiler.version.value) <= 12: - raise ConanInvalidConfiguration("{} {} does not support Visual Studio <= 12".format(self.name, self.version)) - del self.options.fPIC - - def configure(self): - if self.settings.os == "Windows" and self.options.shared: - raise ConanInvalidConfiguration("Windows shared builds are not supported right now, see issue #639") - - def _configure_cmake(self): - cmake = CMake(self) - - cmake.definitions["BENCHMARK_ENABLE_TESTING"] = "OFF" - cmake.definitions["BENCHMARK_ENABLE_GTEST_TESTS"] = "OFF" - cmake.definitions["BENCHMARK_ENABLE_LTO"] = "ON" if self.options.enable_lto else "OFF" - cmake.definitions["BENCHMARK_ENABLE_EXCEPTIONS"] = "ON" if self.options.enable_exceptions else "OFF" - - # See https://github.com/google/benchmark/pull/638 for Windows 32 build explanation - if self.settings.os != "Windows": - cmake.definitions["BENCHMARK_BUILD_32_BITS"] = "ON" if "64" not in str(self.settings.arch) else "OFF" - cmake.definitions["BENCHMARK_USE_LIBCXX"] = "ON" if (str(self.settings.compiler.libcxx) == "libc++") else "OFF" - else: - cmake.definitions["BENCHMARK_USE_LIBCXX"] = "OFF" - - cmake.configure(build_folder=self._build_subfolder) - return cmake - - def build(self): - cmake = self._configure_cmake() - cmake.build() - - def package(self): - cmake = self._configure_cmake() - cmake.install() - - self.copy(pattern="LICENSE", dst="licenses") - - def package_info(self): - self.cpp_info.libs = tools.collect_libs(self) - if self.settings.os == "Linux": - self.cpp_info.libs.extend(["pthread", "rt"]) - elif self.settings.os == "Windows": - self.cpp_info.libs.append("shlwapi") - elif self.settings.os == "SunOS": - self.cpp_info.libs.append("kstat") diff --git a/benchmarks/thirdparty/benchmark/dependencies.md b/benchmarks/thirdparty/benchmark/dependencies.md deleted file mode 100755 index 6289b4e354..0000000000 --- a/benchmarks/thirdparty/benchmark/dependencies.md +++ /dev/null @@ -1,18 +0,0 @@ -# Build tool dependency policy - -To ensure the broadest compatibility when building the benchmark library, but -still allow forward progress, we require any build tooling to be available for: - -* Debian stable AND -* The last two Ubuntu LTS releases AND - -Currently, this means using build tool versions that are available for Ubuntu -16.04 (Xenial), Ubuntu 18.04 (Bionic), and Debian stretch. - -_Note, [travis](.travis.yml) runs under Ubuntu 14.04 (Trusty) for linux builds._ - -## cmake -The current supported version is cmake 3.5.1 as of 2018-06-06. - -_Note, this version is also available for Ubuntu 14.04, the previous Ubuntu LTS -release, as `cmake3`._ diff --git a/benchmarks/thirdparty/benchmark/docs/AssemblyTests.md b/benchmarks/thirdparty/benchmark/docs/AssemblyTests.md deleted file mode 100755 index 1fbdc269b5..0000000000 --- a/benchmarks/thirdparty/benchmark/docs/AssemblyTests.md +++ /dev/null @@ -1,147 +0,0 @@ -# Assembly Tests - -The Benchmark library provides a number of functions whose primary -purpose in to affect assembly generation, including `DoNotOptimize` -and `ClobberMemory`. In addition there are other functions, -such as `KeepRunning`, for which generating good assembly is paramount. - -For these functions it's important to have tests that verify the -correctness and quality of the implementation. This requires testing -the code generated by the compiler. - -This document describes how the Benchmark library tests compiler output, -as well as how to properly write new tests. - - -## Anatomy of a Test - -Writing a test has two steps: - -* Write the code you want to generate assembly for. -* Add `// CHECK` lines to match against the verified assembly. - -Example: -```c++ - -// CHECK-LABEL: test_add: -extern "C" int test_add() { - extern int ExternInt; - return ExternInt + 1; - - // CHECK: movl ExternInt(%rip), %eax - // CHECK: addl %eax - // CHECK: ret -} - -``` - -#### LLVM Filecheck - -[LLVM's Filecheck](https://llvm.org/docs/CommandGuide/FileCheck.html) -is used to test the generated assembly against the `// CHECK` lines -specified in the tests source file. Please see the documentation -linked above for information on how to write `CHECK` directives. - -#### Tips and Tricks: - -* Tests should match the minimal amount of output required to establish -correctness. `CHECK` directives don't have to match on the exact next line -after the previous match, so tests should omit checks for unimportant -bits of assembly. ([`CHECK-NEXT`](https://llvm.org/docs/CommandGuide/FileCheck.html#the-check-next-directive) -can be used to ensure a match occurs exactly after the previous match). - -* The tests are compiled with `-O3 -g0`. So we're only testing the -optimized output. - -* The assembly output is further cleaned up using `tools/strip_asm.py`. -This removes comments, assembler directives, and unused labels before -the test is run. - -* The generated and stripped assembly file for a test is output under -`/test/.s` - -* Filecheck supports using [`CHECK` prefixes](https://llvm.org/docs/CommandGuide/FileCheck.html#cmdoption-check-prefixes) -to specify lines that should only match in certain situations. -The Benchmark tests use `CHECK-CLANG` and `CHECK-GNU` for lines that -are only expected to match Clang or GCC's output respectively. Normal -`CHECK` lines match against all compilers. (Note: `CHECK-NOT` and -`CHECK-LABEL` are NOT prefixes. They are versions of non-prefixed -`CHECK` lines) - -* Use `extern "C"` to disable name mangling for specific functions. This -makes them easier to name in the `CHECK` lines. - - -## Problems Writing Portable Tests - -Writing tests which check the code generated by a compiler are -inherently non-portable. Different compilers and even different compiler -versions may generate entirely different code. The Benchmark tests -must tolerate this. - -LLVM Filecheck provides a number of mechanisms to help write -"more portable" tests; including [matching using regular expressions](https://llvm.org/docs/CommandGuide/FileCheck.html#filecheck-pattern-matching-syntax), -allowing the creation of [named variables](https://llvm.org/docs/CommandGuide/FileCheck.html#filecheck-variables) -for later matching, and [checking non-sequential matches](https://llvm.org/docs/CommandGuide/FileCheck.html#the-check-dag-directive). - -#### Capturing Variables - -For example, say GCC stores a variable in a register but Clang stores -it in memory. To write a test that tolerates both cases we "capture" -the destination of the store, and then use the captured expression -to write the remainder of the test. - -```c++ -// CHECK-LABEL: test_div_no_op_into_shr: -extern "C" void test_div_no_op_into_shr(int value) { - int divisor = 2; - benchmark::DoNotOptimize(divisor); // hide the value from the optimizer - return value / divisor; - - // CHECK: movl $2, [[DEST:.*]] - // CHECK: idivl [[DEST]] - // CHECK: ret -} -``` - -#### Using Regular Expressions to Match Differing Output - -Often tests require testing assembly lines which may subtly differ -between compilers or compiler versions. A common example of this -is matching stack frame addresses. In this case regular expressions -can be used to match the differing bits of output. For example: - -```c++ -int ExternInt; -struct Point { int x, y, z; }; - -// CHECK-LABEL: test_store_point: -extern "C" void test_store_point() { - Point p{ExternInt, ExternInt, ExternInt}; - benchmark::DoNotOptimize(p); - - // CHECK: movl ExternInt(%rip), %eax - // CHECK: movl %eax, -{{[0-9]+}}(%rsp) - // CHECK: movl %eax, -{{[0-9]+}}(%rsp) - // CHECK: movl %eax, -{{[0-9]+}}(%rsp) - // CHECK: ret -} -``` - -## Current Requirements and Limitations - -The tests require Filecheck to be installed along the `PATH` of the -build machine. Otherwise the tests will be disabled. - -Additionally, as mentioned in the previous section, codegen tests are -inherently non-portable. Currently the tests are limited to: - -* x86_64 targets. -* Compiled with GCC or Clang - -Further work could be done, at least on a limited basis, to extend the -tests to other architectures and compilers (using `CHECK` prefixes). - -Furthermore, the tests fail for builds which specify additional flags -that modify code generation, including `--coverage` or `-fsanitize=`. - diff --git a/benchmarks/thirdparty/benchmark/docs/_config.yml b/benchmarks/thirdparty/benchmark/docs/_config.yml deleted file mode 100755 index 18854876c6..0000000000 --- a/benchmarks/thirdparty/benchmark/docs/_config.yml +++ /dev/null @@ -1 +0,0 @@ -theme: jekyll-theme-midnight \ No newline at end of file diff --git a/benchmarks/thirdparty/benchmark/docs/releasing.md b/benchmarks/thirdparty/benchmark/docs/releasing.md deleted file mode 100755 index f0cd7010e3..0000000000 --- a/benchmarks/thirdparty/benchmark/docs/releasing.md +++ /dev/null @@ -1,16 +0,0 @@ -# How to release - -* Make sure you're on master and synced to HEAD -* Ensure the project builds and tests run (sanity check only, obviously) - * `parallel -j0 exec ::: test/*_test` can help ensure everything at least - passes -* Prepare release notes - * `git log $(git describe --abbrev=0 --tags)..HEAD` gives you the list of - commits between the last annotated tag and HEAD - * Pick the most interesting. -* Create a release through github's interface - * Note this will create a lightweight tag. - * Update this to an annotated tag: - * `git pull --tags` - * `git tag -a -f ` - * `git push --force origin` diff --git a/benchmarks/thirdparty/benchmark/docs/tools.md b/benchmarks/thirdparty/benchmark/docs/tools.md deleted file mode 100755 index f2d0c497f3..0000000000 --- a/benchmarks/thirdparty/benchmark/docs/tools.md +++ /dev/null @@ -1,203 +0,0 @@ -# Benchmark Tools - -## compare.py - -The `compare.py` can be used to compare the result of benchmarks. - -### Dependencies -The utility relies on the [scipy](https://www.scipy.org) package which can be installed using pip: -```bash -pip3 install -r requirements.txt -``` - -### Displaying aggregates only - -The switch `-a` / `--display_aggregates_only` can be used to control the -displayment of the normal iterations vs the aggregates. When passed, it will -be passthrough to the benchmark binaries to be run, and will be accounted for -in the tool itself; only the aggregates will be displayed, but not normal runs. -It only affects the display, the separate runs will still be used to calculate -the U test. - -### Modes of operation - -There are three modes of operation: - -1. Just compare two benchmarks -The program is invoked like: - -``` bash -$ compare.py benchmarks [benchmark options]... -``` -Where `` and `` either specify a benchmark executable file, or a JSON output file. The type of the input file is automatically detected. If a benchmark executable is specified then the benchmark is run to obtain the results. Otherwise the results are simply loaded from the output file. - -`[benchmark options]` will be passed to the benchmarks invocations. They can be anything that binary accepts, be it either normal `--benchmark_*` parameters, or some custom parameters your binary takes. - -Example output: -``` -$ ./compare.py benchmarks ./a.out ./a.out -RUNNING: ./a.out --benchmark_out=/tmp/tmprBT5nW -Run on (8 X 4000 MHz CPU s) -2017-11-07 21:16:44 ------------------------------------------------------- -Benchmark Time CPU Iterations ------------------------------------------------------- -BM_memcpy/8 36 ns 36 ns 19101577 211.669MB/s -BM_memcpy/64 76 ns 76 ns 9412571 800.199MB/s -BM_memcpy/512 84 ns 84 ns 8249070 5.64771GB/s -BM_memcpy/1024 116 ns 116 ns 6181763 8.19505GB/s -BM_memcpy/8192 643 ns 643 ns 1062855 11.8636GB/s -BM_copy/8 222 ns 222 ns 3137987 34.3772MB/s -BM_copy/64 1608 ns 1608 ns 432758 37.9501MB/s -BM_copy/512 12589 ns 12589 ns 54806 38.7867MB/s -BM_copy/1024 25169 ns 25169 ns 27713 38.8003MB/s -BM_copy/8192 201165 ns 201112 ns 3486 38.8466MB/s -RUNNING: ./a.out --benchmark_out=/tmp/tmpt1wwG_ -Run on (8 X 4000 MHz CPU s) -2017-11-07 21:16:53 ------------------------------------------------------- -Benchmark Time CPU Iterations ------------------------------------------------------- -BM_memcpy/8 36 ns 36 ns 19397903 211.255MB/s -BM_memcpy/64 73 ns 73 ns 9691174 839.635MB/s -BM_memcpy/512 85 ns 85 ns 8312329 5.60101GB/s -BM_memcpy/1024 118 ns 118 ns 6438774 8.11608GB/s -BM_memcpy/8192 656 ns 656 ns 1068644 11.6277GB/s -BM_copy/8 223 ns 223 ns 3146977 34.2338MB/s -BM_copy/64 1611 ns 1611 ns 435340 37.8751MB/s -BM_copy/512 12622 ns 12622 ns 54818 38.6844MB/s -BM_copy/1024 25257 ns 25239 ns 27779 38.6927MB/s -BM_copy/8192 205013 ns 205010 ns 3479 38.108MB/s -Comparing ./a.out to ./a.out -Benchmark Time CPU Time Old Time New CPU Old CPU New ------------------------------------------------------------------------------------------------------- -BM_memcpy/8 +0.0020 +0.0020 36 36 36 36 -BM_memcpy/64 -0.0468 -0.0470 76 73 76 73 -BM_memcpy/512 +0.0081 +0.0083 84 85 84 85 -BM_memcpy/1024 +0.0098 +0.0097 116 118 116 118 -BM_memcpy/8192 +0.0200 +0.0203 643 656 643 656 -BM_copy/8 +0.0046 +0.0042 222 223 222 223 -BM_copy/64 +0.0020 +0.0020 1608 1611 1608 1611 -BM_copy/512 +0.0027 +0.0026 12589 12622 12589 12622 -BM_copy/1024 +0.0035 +0.0028 25169 25257 25169 25239 -BM_copy/8192 +0.0191 +0.0194 201165 205013 201112 205010 -``` - -What it does is for the every benchmark from the first run it looks for the benchmark with exactly the same name in the second run, and then compares the results. If the names differ, the benchmark is omitted from the diff. -As you can note, the values in `Time` and `CPU` columns are calculated as `(new - old) / |old|`. - -2. Compare two different filters of one benchmark -The program is invoked like: - -``` bash -$ compare.py filters [benchmark options]... -``` -Where `` either specify a benchmark executable file, or a JSON output file. The type of the input file is automatically detected. If a benchmark executable is specified then the benchmark is run to obtain the results. Otherwise the results are simply loaded from the output file. - -Where `` and `` are the same regex filters that you would pass to the `[--benchmark_filter=]` parameter of the benchmark binary. - -`[benchmark options]` will be passed to the benchmarks invocations. They can be anything that binary accepts, be it either normal `--benchmark_*` parameters, or some custom parameters your binary takes. - -Example output: -``` -$ ./compare.py filters ./a.out BM_memcpy BM_copy -RUNNING: ./a.out --benchmark_filter=BM_memcpy --benchmark_out=/tmp/tmpBWKk0k -Run on (8 X 4000 MHz CPU s) -2017-11-07 21:37:28 ------------------------------------------------------- -Benchmark Time CPU Iterations ------------------------------------------------------- -BM_memcpy/8 36 ns 36 ns 17891491 211.215MB/s -BM_memcpy/64 74 ns 74 ns 9400999 825.646MB/s -BM_memcpy/512 87 ns 87 ns 8027453 5.46126GB/s -BM_memcpy/1024 111 ns 111 ns 6116853 8.5648GB/s -BM_memcpy/8192 657 ns 656 ns 1064679 11.6247GB/s -RUNNING: ./a.out --benchmark_filter=BM_copy --benchmark_out=/tmp/tmpAvWcOM -Run on (8 X 4000 MHz CPU s) -2017-11-07 21:37:33 ----------------------------------------------------- -Benchmark Time CPU Iterations ----------------------------------------------------- -BM_copy/8 227 ns 227 ns 3038700 33.6264MB/s -BM_copy/64 1640 ns 1640 ns 426893 37.2154MB/s -BM_copy/512 12804 ns 12801 ns 55417 38.1444MB/s -BM_copy/1024 25409 ns 25407 ns 27516 38.4365MB/s -BM_copy/8192 202986 ns 202990 ns 3454 38.4871MB/s -Comparing BM_memcpy to BM_copy (from ./a.out) -Benchmark Time CPU Time Old Time New CPU Old CPU New --------------------------------------------------------------------------------------------------------------------- -[BM_memcpy vs. BM_copy]/8 +5.2829 +5.2812 36 227 36 227 -[BM_memcpy vs. BM_copy]/64 +21.1719 +21.1856 74 1640 74 1640 -[BM_memcpy vs. BM_copy]/512 +145.6487 +145.6097 87 12804 87 12801 -[BM_memcpy vs. BM_copy]/1024 +227.1860 +227.1776 111 25409 111 25407 -[BM_memcpy vs. BM_copy]/8192 +308.1664 +308.2898 657 202986 656 202990 -``` - -As you can see, it applies filter to the benchmarks, both when running the benchmark, and before doing the diff. And to make the diff work, the matches are replaced with some common string. Thus, you can compare two different benchmark families within one benchmark binary. -As you can note, the values in `Time` and `CPU` columns are calculated as `(new - old) / |old|`. - -3. Compare filter one from benchmark one to filter two from benchmark two: -The program is invoked like: - -``` bash -$ compare.py filters [benchmark options]... -``` - -Where `` and `` either specify a benchmark executable file, or a JSON output file. The type of the input file is automatically detected. If a benchmark executable is specified then the benchmark is run to obtain the results. Otherwise the results are simply loaded from the output file. - -Where `` and `` are the same regex filters that you would pass to the `[--benchmark_filter=]` parameter of the benchmark binary. - -`[benchmark options]` will be passed to the benchmarks invocations. They can be anything that binary accepts, be it either normal `--benchmark_*` parameters, or some custom parameters your binary takes. - -Example output: -``` -$ ./compare.py benchmarksfiltered ./a.out BM_memcpy ./a.out BM_copy -RUNNING: ./a.out --benchmark_filter=BM_memcpy --benchmark_out=/tmp/tmp_FvbYg -Run on (8 X 4000 MHz CPU s) -2017-11-07 21:38:27 ------------------------------------------------------- -Benchmark Time CPU Iterations ------------------------------------------------------- -BM_memcpy/8 37 ns 37 ns 18953482 204.118MB/s -BM_memcpy/64 74 ns 74 ns 9206578 828.245MB/s -BM_memcpy/512 91 ns 91 ns 8086195 5.25476GB/s -BM_memcpy/1024 120 ns 120 ns 5804513 7.95662GB/s -BM_memcpy/8192 664 ns 664 ns 1028363 11.4948GB/s -RUNNING: ./a.out --benchmark_filter=BM_copy --benchmark_out=/tmp/tmpDfL5iE -Run on (8 X 4000 MHz CPU s) -2017-11-07 21:38:32 ----------------------------------------------------- -Benchmark Time CPU Iterations ----------------------------------------------------- -BM_copy/8 230 ns 230 ns 2985909 33.1161MB/s -BM_copy/64 1654 ns 1653 ns 419408 36.9137MB/s -BM_copy/512 13122 ns 13120 ns 53403 37.2156MB/s -BM_copy/1024 26679 ns 26666 ns 26575 36.6218MB/s -BM_copy/8192 215068 ns 215053 ns 3221 36.3283MB/s -Comparing BM_memcpy (from ./a.out) to BM_copy (from ./a.out) -Benchmark Time CPU Time Old Time New CPU Old CPU New --------------------------------------------------------------------------------------------------------------------- -[BM_memcpy vs. BM_copy]/8 +5.1649 +5.1637 37 230 37 230 -[BM_memcpy vs. BM_copy]/64 +21.4352 +21.4374 74 1654 74 1653 -[BM_memcpy vs. BM_copy]/512 +143.6022 +143.5865 91 13122 91 13120 -[BM_memcpy vs. BM_copy]/1024 +221.5903 +221.4790 120 26679 120 26666 -[BM_memcpy vs. BM_copy]/8192 +322.9059 +323.0096 664 215068 664 215053 -``` -This is a mix of the previous two modes, two (potentially different) benchmark binaries are run, and a different filter is applied to each one. -As you can note, the values in `Time` and `CPU` columns are calculated as `(new - old) / |old|`. - -### U test - -If there is a sufficient repetition count of the benchmarks, the tool can do -a [U Test](https://en.wikipedia.org/wiki/Mann%E2%80%93Whitney_U_test), of the -null hypothesis that it is equally likely that a randomly selected value from -one sample will be less than or greater than a randomly selected value from a -second sample. - -If the calculated p-value is below this value is lower than the significance -level alpha, then the result is said to be statistically significant and the -null hypothesis is rejected. Which in other words means that the two benchmarks -aren't identical. - -**WARNING**: requires **LARGE** (no less than 9) number of repetitions to be -meaningful! diff --git a/benchmarks/thirdparty/benchmark/include/benchmark/benchmark.h b/benchmarks/thirdparty/benchmark/include/benchmark/benchmark.h deleted file mode 100755 index 01f12620ee..0000000000 --- a/benchmarks/thirdparty/benchmark/include/benchmark/benchmark.h +++ /dev/null @@ -1,1601 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Support for registering benchmarks for functions. - -/* Example usage: -// Define a function that executes the code to be measured a -// specified number of times: -static void BM_StringCreation(benchmark::State& state) { - for (auto _ : state) - std::string empty_string; -} - -// Register the function as a benchmark -BENCHMARK(BM_StringCreation); - -// Define another benchmark -static void BM_StringCopy(benchmark::State& state) { - std::string x = "hello"; - for (auto _ : state) - std::string copy(x); -} -BENCHMARK(BM_StringCopy); - -// Augment the main() program to invoke benchmarks if specified -// via the --benchmarks command line flag. E.g., -// my_unittest --benchmark_filter=all -// my_unittest --benchmark_filter=BM_StringCreation -// my_unittest --benchmark_filter=String -// my_unittest --benchmark_filter='Copy|Creation' -int main(int argc, char** argv) { - benchmark::Initialize(&argc, argv); - benchmark::RunSpecifiedBenchmarks(); - return 0; -} - -// Sometimes a family of microbenchmarks can be implemented with -// just one routine that takes an extra argument to specify which -// one of the family of benchmarks to run. For example, the following -// code defines a family of microbenchmarks for measuring the speed -// of memcpy() calls of different lengths: - -static void BM_memcpy(benchmark::State& state) { - char* src = new char[state.range(0)]; char* dst = new char[state.range(0)]; - memset(src, 'x', state.range(0)); - for (auto _ : state) - memcpy(dst, src, state.range(0)); - state.SetBytesProcessed(state.iterations() * state.range(0)); - delete[] src; delete[] dst; -} -BENCHMARK(BM_memcpy)->Arg(8)->Arg(64)->Arg(512)->Arg(1<<10)->Arg(8<<10); - -// The preceding code is quite repetitive, and can be replaced with the -// following short-hand. The following invocation will pick a few -// appropriate arguments in the specified range and will generate a -// microbenchmark for each such argument. -BENCHMARK(BM_memcpy)->Range(8, 8<<10); - -// You might have a microbenchmark that depends on two inputs. For -// example, the following code defines a family of microbenchmarks for -// measuring the speed of set insertion. -static void BM_SetInsert(benchmark::State& state) { - set data; - for (auto _ : state) { - state.PauseTiming(); - data = ConstructRandomSet(state.range(0)); - state.ResumeTiming(); - for (int j = 0; j < state.range(1); ++j) - data.insert(RandomNumber()); - } -} -BENCHMARK(BM_SetInsert) - ->Args({1<<10, 128}) - ->Args({2<<10, 128}) - ->Args({4<<10, 128}) - ->Args({8<<10, 128}) - ->Args({1<<10, 512}) - ->Args({2<<10, 512}) - ->Args({4<<10, 512}) - ->Args({8<<10, 512}); - -// The preceding code is quite repetitive, and can be replaced with -// the following short-hand. The following macro will pick a few -// appropriate arguments in the product of the two specified ranges -// and will generate a microbenchmark for each such pair. -BENCHMARK(BM_SetInsert)->Ranges({{1<<10, 8<<10}, {128, 512}}); - -// For more complex patterns of inputs, passing a custom function -// to Apply allows programmatic specification of an -// arbitrary set of arguments to run the microbenchmark on. -// The following example enumerates a dense range on -// one parameter, and a sparse range on the second. -static void CustomArguments(benchmark::internal::Benchmark* b) { - for (int i = 0; i <= 10; ++i) - for (int j = 32; j <= 1024*1024; j *= 8) - b->Args({i, j}); -} -BENCHMARK(BM_SetInsert)->Apply(CustomArguments); - -// Templated microbenchmarks work the same way: -// Produce then consume 'size' messages 'iters' times -// Measures throughput in the absence of multiprogramming. -template int BM_Sequential(benchmark::State& state) { - Q q; - typename Q::value_type v; - for (auto _ : state) { - for (int i = state.range(0); i--; ) - q.push(v); - for (int e = state.range(0); e--; ) - q.Wait(&v); - } - // actually messages, not bytes: - state.SetBytesProcessed(state.iterations() * state.range(0)); -} -BENCHMARK_TEMPLATE(BM_Sequential, WaitQueue)->Range(1<<0, 1<<10); - -Use `Benchmark::MinTime(double t)` to set the minimum time used to run the -benchmark. This option overrides the `benchmark_min_time` flag. - -void BM_test(benchmark::State& state) { - ... body ... -} -BENCHMARK(BM_test)->MinTime(2.0); // Run for at least 2 seconds. - -In a multithreaded test, it is guaranteed that none of the threads will start -until all have reached the loop start, and all will have finished before any -thread exits the loop body. As such, any global setup or teardown you want to -do can be wrapped in a check against the thread index: - -static void BM_MultiThreaded(benchmark::State& state) { - if (state.thread_index == 0) { - // Setup code here. - } - for (auto _ : state) { - // Run the test as normal. - } - if (state.thread_index == 0) { - // Teardown code here. - } -} -BENCHMARK(BM_MultiThreaded)->Threads(4); - - -If a benchmark runs a few milliseconds it may be hard to visually compare the -measured times, since the output data is given in nanoseconds per default. In -order to manually set the time unit, you can specify it manually: - -BENCHMARK(BM_test)->Unit(benchmark::kMillisecond); -*/ - -#ifndef BENCHMARK_BENCHMARK_H_ -#define BENCHMARK_BENCHMARK_H_ - -// The _MSVC_LANG check should detect Visual Studio 2015 Update 3 and newer. -#if __cplusplus >= 201103L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201103L) -#define BENCHMARK_HAS_CXX11 -#endif - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#if defined(BENCHMARK_HAS_CXX11) -#include -#include -#include -#endif - -#if defined(_MSC_VER) -#include // for _ReadWriteBarrier -#endif - -#ifndef BENCHMARK_HAS_CXX11 -#define BENCHMARK_DISALLOW_COPY_AND_ASSIGN(TypeName) \ - TypeName(const TypeName&); \ - TypeName& operator=(const TypeName&) -#else -#define BENCHMARK_DISALLOW_COPY_AND_ASSIGN(TypeName) \ - TypeName(const TypeName&) = delete; \ - TypeName& operator=(const TypeName&) = delete -#endif - -#if defined(__GNUC__) -#define BENCHMARK_UNUSED __attribute__((unused)) -#define BENCHMARK_ALWAYS_INLINE __attribute__((always_inline)) -#define BENCHMARK_NOEXCEPT noexcept -#define BENCHMARK_NOEXCEPT_OP(x) noexcept(x) -#elif defined(_MSC_VER) && !defined(__clang__) -#define BENCHMARK_UNUSED -#define BENCHMARK_ALWAYS_INLINE __forceinline -#if _MSC_VER >= 1900 -#define BENCHMARK_NOEXCEPT noexcept -#define BENCHMARK_NOEXCEPT_OP(x) noexcept(x) -#else -#define BENCHMARK_NOEXCEPT -#define BENCHMARK_NOEXCEPT_OP(x) -#endif -#define __func__ __FUNCTION__ -#else -#define BENCHMARK_UNUSED -#define BENCHMARK_ALWAYS_INLINE -#define BENCHMARK_NOEXCEPT -#define BENCHMARK_NOEXCEPT_OP(x) -#endif - -#define BENCHMARK_INTERNAL_TOSTRING2(x) #x -#define BENCHMARK_INTERNAL_TOSTRING(x) BENCHMARK_INTERNAL_TOSTRING2(x) - -#if defined(__GNUC__) || defined(__clang__) -#define BENCHMARK_BUILTIN_EXPECT(x, y) __builtin_expect(x, y) -#define BENCHMARK_DEPRECATED_MSG(msg) __attribute__((deprecated(msg))) -#else -#define BENCHMARK_BUILTIN_EXPECT(x, y) x -#define BENCHMARK_DEPRECATED_MSG(msg) -#define BENCHMARK_WARNING_MSG(msg) \ - __pragma(message(__FILE__ "(" BENCHMARK_INTERNAL_TOSTRING( \ - __LINE__) ") : warning note: " msg)) -#endif - -#if defined(__GNUC__) && !defined(__clang__) -#define BENCHMARK_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) -#endif - -#ifndef __has_builtin -#define __has_builtin(x) 0 -#endif - -#if defined(__GNUC__) || __has_builtin(__builtin_unreachable) -#define BENCHMARK_UNREACHABLE() __builtin_unreachable() -#elif defined(_MSC_VER) -#define BENCHMARK_UNREACHABLE() __assume(false) -#else -#define BENCHMARK_UNREACHABLE() ((void)0) -#endif - -namespace benchmark { -class BenchmarkReporter; -class MemoryManager; - -void Initialize(int* argc, char** argv); - -// Report to stdout all arguments in 'argv' as unrecognized except the first. -// Returns true there is at least on unrecognized argument (i.e. 'argc' > 1). -bool ReportUnrecognizedArguments(int argc, char** argv); - -// Generate a list of benchmarks matching the specified --benchmark_filter flag -// and if --benchmark_list_tests is specified return after printing the name -// of each matching benchmark. Otherwise run each matching benchmark and -// report the results. -// -// The second and third overload use the specified 'display_reporter' and -// 'file_reporter' respectively. 'file_reporter' will write to the file -// specified -// by '--benchmark_output'. If '--benchmark_output' is not given the -// 'file_reporter' is ignored. -// -// RETURNS: The number of matching benchmarks. -size_t RunSpecifiedBenchmarks(); -size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter); -size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter, - BenchmarkReporter* file_reporter); - -// Register a MemoryManager instance that will be used to collect and report -// allocation measurements for benchmark runs. -void RegisterMemoryManager(MemoryManager* memory_manager); - -namespace internal { -class Benchmark; -class BenchmarkImp; -class BenchmarkFamilies; - -void UseCharPointer(char const volatile*); - -// Take ownership of the pointer and register the benchmark. Return the -// registered benchmark. -Benchmark* RegisterBenchmarkInternal(Benchmark*); - -// Ensure that the standard streams are properly initialized in every TU. -int InitializeStreams(); -BENCHMARK_UNUSED static int stream_init_anchor = InitializeStreams(); - -} // namespace internal - -#if (!defined(__GNUC__) && !defined(__clang__)) || defined(__pnacl__) || \ - defined(__EMSCRIPTEN__) -#define BENCHMARK_HAS_NO_INLINE_ASSEMBLY -#endif - -// The DoNotOptimize(...) function can be used to prevent a value or -// expression from being optimized away by the compiler. This function is -// intended to add little to no overhead. -// See: https://youtu.be/nXaxk27zwlk?t=2441 -#ifndef BENCHMARK_HAS_NO_INLINE_ASSEMBLY -template -inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) { - asm volatile("" : : "r,m"(value) : "memory"); -} - -template -inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp& value) { -#if defined(__clang__) - asm volatile("" : "+r,m"(value) : : "memory"); -#else - asm volatile("" : "+m,r"(value) : : "memory"); -#endif -} - -// Force the compiler to flush pending writes to global memory. Acts as an -// effective read/write barrier -inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() { - asm volatile("" : : : "memory"); -} -#elif defined(_MSC_VER) -template -inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) { - internal::UseCharPointer(&reinterpret_cast(value)); - _ReadWriteBarrier(); -} - -inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() { _ReadWriteBarrier(); } -#else -template -inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) { - internal::UseCharPointer(&reinterpret_cast(value)); -} -// FIXME Add ClobberMemory() for non-gnu and non-msvc compilers -#endif - -// This class is used for user-defined counters. -class Counter { - public: - enum Flags { - kDefaults = 0, - // Mark the counter as a rate. It will be presented divided - // by the duration of the benchmark. - kIsRate = 1U << 0U, - // Mark the counter as a thread-average quantity. It will be - // presented divided by the number of threads. - kAvgThreads = 1U << 1U, - // Mark the counter as a thread-average rate. See above. - kAvgThreadsRate = kIsRate | kAvgThreads, - // Mark the counter as a constant value, valid/same for *every* iteration. - // When reporting, it will be *multiplied* by the iteration count. - kIsIterationInvariant = 1U << 2U, - // Mark the counter as a constant rate. - // When reporting, it will be *multiplied* by the iteration count - // and then divided by the duration of the benchmark. - kIsIterationInvariantRate = kIsRate | kIsIterationInvariant, - // Mark the counter as a iteration-average quantity. - // It will be presented divided by the number of iterations. - kAvgIterations = 1U << 3U, - // Mark the counter as a iteration-average rate. See above. - kAvgIterationsRate = kIsRate | kAvgIterations, - - // In the end, invert the result. This is always done last! - kInvert = 1U << 31U - }; - - enum OneK { - // 1'000 items per 1k - kIs1000 = 1000, - // 1'024 items per 1k - kIs1024 = 1024 - }; - - double value; - Flags flags; - OneK oneK; - - BENCHMARK_ALWAYS_INLINE - Counter(double v = 0., Flags f = kDefaults, OneK k = kIs1000) - : value(v), flags(f), oneK(k) {} - - BENCHMARK_ALWAYS_INLINE operator double const&() const { return value; } - BENCHMARK_ALWAYS_INLINE operator double&() { return value; } -}; - -// A helper for user code to create unforeseen combinations of Flags, without -// having to do this cast manually each time, or providing this operator. -Counter::Flags inline operator|(const Counter::Flags& LHS, - const Counter::Flags& RHS) { - return static_cast(static_cast(LHS) | - static_cast(RHS)); -} - -// This is the container for the user-defined counters. -typedef std::map UserCounters; - -// TimeUnit is passed to a benchmark in order to specify the order of magnitude -// for the measured time. -enum TimeUnit { kNanosecond, kMicrosecond, kMillisecond }; - -// BigO is passed to a benchmark in order to specify the asymptotic -// computational -// complexity for the benchmark. In case oAuto is selected, complexity will be -// calculated automatically to the best fit. -enum BigO { oNone, o1, oN, oNSquared, oNCubed, oLogN, oNLogN, oAuto, oLambda }; - -typedef uint64_t IterationCount; - -// BigOFunc is passed to a benchmark in order to specify the asymptotic -// computational complexity for the benchmark. -typedef double(BigOFunc)(IterationCount); - -// StatisticsFunc is passed to a benchmark in order to compute some descriptive -// statistics over all the measurements of some type -typedef double(StatisticsFunc)(const std::vector&); - -namespace internal { -struct Statistics { - std::string name_; - StatisticsFunc* compute_; - - Statistics(const std::string& name, StatisticsFunc* compute) - : name_(name), compute_(compute) {} -}; - -struct BenchmarkInstance; -class ThreadTimer; -class ThreadManager; - -enum AggregationReportMode -#if defined(BENCHMARK_HAS_CXX11) - : unsigned -#else -#endif -{ - // The mode has not been manually specified - ARM_Unspecified = 0, - // The mode is user-specified. - // This may or may not be set when the following bit-flags are set. - ARM_Default = 1U << 0U, - // File reporter should only output aggregates. - ARM_FileReportAggregatesOnly = 1U << 1U, - // Display reporter should only output aggregates - ARM_DisplayReportAggregatesOnly = 1U << 2U, - // Both reporters should only display aggregates. - ARM_ReportAggregatesOnly = - ARM_FileReportAggregatesOnly | ARM_DisplayReportAggregatesOnly -}; - -} // namespace internal - -// State is passed to a running Benchmark and contains state for the -// benchmark to use. -class State { - public: - struct StateIterator; - friend struct StateIterator; - - // Returns iterators used to run each iteration of a benchmark using a - // C++11 ranged-based for loop. These functions should not be called directly. - // - // REQUIRES: The benchmark has not started running yet. Neither begin nor end - // have been called previously. - // - // NOTE: KeepRunning may not be used after calling either of these functions. - BENCHMARK_ALWAYS_INLINE StateIterator begin(); - BENCHMARK_ALWAYS_INLINE StateIterator end(); - - // Returns true if the benchmark should continue through another iteration. - // NOTE: A benchmark may not return from the test until KeepRunning() has - // returned false. - bool KeepRunning(); - - // Returns true iff the benchmark should run n more iterations. - // REQUIRES: 'n' > 0. - // NOTE: A benchmark must not return from the test until KeepRunningBatch() - // has returned false. - // NOTE: KeepRunningBatch() may overshoot by up to 'n' iterations. - // - // Intended usage: - // while (state.KeepRunningBatch(1000)) { - // // process 1000 elements - // } - bool KeepRunningBatch(IterationCount n); - - // REQUIRES: timer is running and 'SkipWithError(...)' has not been called - // by the current thread. - // Stop the benchmark timer. If not called, the timer will be - // automatically stopped after the last iteration of the benchmark loop. - // - // For threaded benchmarks the PauseTiming() function only pauses the timing - // for the current thread. - // - // NOTE: The "real time" measurement is per-thread. If different threads - // report different measurements the largest one is reported. - // - // NOTE: PauseTiming()/ResumeTiming() are relatively - // heavyweight, and so their use should generally be avoided - // within each benchmark iteration, if possible. - void PauseTiming(); - - // REQUIRES: timer is not running and 'SkipWithError(...)' has not been called - // by the current thread. - // Start the benchmark timer. The timer is NOT running on entrance to the - // benchmark function. It begins running after control flow enters the - // benchmark loop. - // - // NOTE: PauseTiming()/ResumeTiming() are relatively - // heavyweight, and so their use should generally be avoided - // within each benchmark iteration, if possible. - void ResumeTiming(); - - // REQUIRES: 'SkipWithError(...)' has not been called previously by the - // current thread. - // Report the benchmark as resulting in an error with the specified 'msg'. - // After this call the user may explicitly 'return' from the benchmark. - // - // If the ranged-for style of benchmark loop is used, the user must explicitly - // break from the loop, otherwise all future iterations will be run. - // If the 'KeepRunning()' loop is used the current thread will automatically - // exit the loop at the end of the current iteration. - // - // For threaded benchmarks only the current thread stops executing and future - // calls to `KeepRunning()` will block until all threads have completed - // the `KeepRunning()` loop. If multiple threads report an error only the - // first error message is used. - // - // NOTE: Calling 'SkipWithError(...)' does not cause the benchmark to exit - // the current scope immediately. If the function is called from within - // the 'KeepRunning()' loop the current iteration will finish. It is the users - // responsibility to exit the scope as needed. - void SkipWithError(const char* msg); - - // Returns true if an error has been reported with 'SkipWithError(...)'. - bool error_occurred() const { return error_occurred_; } - - // REQUIRES: called exactly once per iteration of the benchmarking loop. - // Set the manually measured time for this benchmark iteration, which - // is used instead of automatically measured time if UseManualTime() was - // specified. - // - // For threaded benchmarks the final value will be set to the largest - // reported values. - void SetIterationTime(double seconds); - - // Set the number of bytes processed by the current benchmark - // execution. This routine is typically called once at the end of a - // throughput oriented benchmark. - // - // REQUIRES: a benchmark has exited its benchmarking loop. - BENCHMARK_ALWAYS_INLINE - void SetBytesProcessed(int64_t bytes) { - counters["bytes_per_second"] = - Counter(static_cast(bytes), Counter::kIsRate, Counter::kIs1024); - } - - BENCHMARK_ALWAYS_INLINE - int64_t bytes_processed() const { - if (counters.find("bytes_per_second") != counters.end()) - return static_cast(counters.at("bytes_per_second")); - return 0; - } - - // If this routine is called with complexity_n > 0 and complexity report is - // requested for the - // family benchmark, then current benchmark will be part of the computation - // and complexity_n will - // represent the length of N. - BENCHMARK_ALWAYS_INLINE - void SetComplexityN(int64_t complexity_n) { complexity_n_ = complexity_n; } - - BENCHMARK_ALWAYS_INLINE - int64_t complexity_length_n() const { return complexity_n_; } - - // If this routine is called with items > 0, then an items/s - // label is printed on the benchmark report line for the currently - // executing benchmark. It is typically called at the end of a processing - // benchmark where a processing items/second output is desired. - // - // REQUIRES: a benchmark has exited its benchmarking loop. - BENCHMARK_ALWAYS_INLINE - void SetItemsProcessed(int64_t items) { - counters["items_per_second"] = - Counter(static_cast(items), benchmark::Counter::kIsRate); - } - - BENCHMARK_ALWAYS_INLINE - int64_t items_processed() const { - if (counters.find("items_per_second") != counters.end()) - return static_cast(counters.at("items_per_second")); - return 0; - } - - // If this routine is called, the specified label is printed at the - // end of the benchmark report line for the currently executing - // benchmark. Example: - // static void BM_Compress(benchmark::State& state) { - // ... - // double compress = input_size / output_size; - // state.SetLabel(StrFormat("compress:%.1f%%", 100.0*compression)); - // } - // Produces output that looks like: - // BM_Compress 50 50 14115038 compress:27.3% - // - // REQUIRES: a benchmark has exited its benchmarking loop. - void SetLabel(const char* label); - - void BENCHMARK_ALWAYS_INLINE SetLabel(const std::string& str) { - this->SetLabel(str.c_str()); - } - - // Range arguments for this run. CHECKs if the argument has been set. - BENCHMARK_ALWAYS_INLINE - int64_t range(std::size_t pos = 0) const { - assert(range_.size() > pos); - return range_[pos]; - } - - BENCHMARK_DEPRECATED_MSG("use 'range(0)' instead") - int64_t range_x() const { return range(0); } - - BENCHMARK_DEPRECATED_MSG("use 'range(1)' instead") - int64_t range_y() const { return range(1); } - - BENCHMARK_ALWAYS_INLINE - IterationCount iterations() const { - if (BENCHMARK_BUILTIN_EXPECT(!started_, false)) { - return 0; - } - return max_iterations - total_iterations_ + batch_leftover_; - } - - private - : // items we expect on the first cache line (ie 64 bytes of the struct) - // When total_iterations_ is 0, KeepRunning() and friends will return false. - // May be larger than max_iterations. - IterationCount total_iterations_; - - // When using KeepRunningBatch(), batch_leftover_ holds the number of - // iterations beyond max_iters that were run. Used to track - // completed_iterations_ accurately. - IterationCount batch_leftover_; - - public: - const IterationCount max_iterations; - - private: - bool started_; - bool finished_; - bool error_occurred_; - - private: // items we don't need on the first cache line - std::vector range_; - - int64_t complexity_n_; - - public: - // Container for user-defined counters. - UserCounters counters; - // Index of the executing thread. Values from [0, threads). - const int thread_index; - // Number of threads concurrently executing the benchmark. - const int threads; - - private: - State(IterationCount max_iters, const std::vector& ranges, - int thread_i, int n_threads, internal::ThreadTimer* timer, - internal::ThreadManager* manager); - - void StartKeepRunning(); - // Implementation of KeepRunning() and KeepRunningBatch(). - // is_batch must be true unless n is 1. - bool KeepRunningInternal(IterationCount n, bool is_batch); - void FinishKeepRunning(); - internal::ThreadTimer* timer_; - internal::ThreadManager* manager_; - - friend struct internal::BenchmarkInstance; -}; - -inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunning() { - return KeepRunningInternal(1, /*is_batch=*/false); -} - -inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunningBatch(IterationCount n) { - return KeepRunningInternal(n, /*is_batch=*/true); -} - -inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunningInternal(IterationCount n, - bool is_batch) { - // total_iterations_ is set to 0 by the constructor, and always set to a - // nonzero value by StartKepRunning(). - assert(n > 0); - // n must be 1 unless is_batch is true. - assert(is_batch || n == 1); - if (BENCHMARK_BUILTIN_EXPECT(total_iterations_ >= n, true)) { - total_iterations_ -= n; - return true; - } - if (!started_) { - StartKeepRunning(); - if (!error_occurred_ && total_iterations_ >= n) { - total_iterations_ -= n; - return true; - } - } - // For non-batch runs, total_iterations_ must be 0 by now. - if (is_batch && total_iterations_ != 0) { - batch_leftover_ = n - total_iterations_; - total_iterations_ = 0; - return true; - } - FinishKeepRunning(); - return false; -} - -struct State::StateIterator { - struct BENCHMARK_UNUSED Value {}; - typedef std::forward_iterator_tag iterator_category; - typedef Value value_type; - typedef Value reference; - typedef Value pointer; - typedef std::ptrdiff_t difference_type; - - private: - friend class State; - BENCHMARK_ALWAYS_INLINE - StateIterator() : cached_(0), parent_() {} - - BENCHMARK_ALWAYS_INLINE - explicit StateIterator(State* st) - : cached_(st->error_occurred_ ? 0 : st->max_iterations), parent_(st) {} - - public: - BENCHMARK_ALWAYS_INLINE - Value operator*() const { return Value(); } - - BENCHMARK_ALWAYS_INLINE - StateIterator& operator++() { - assert(cached_ > 0); - --cached_; - return *this; - } - - BENCHMARK_ALWAYS_INLINE - bool operator!=(StateIterator const&) const { - if (BENCHMARK_BUILTIN_EXPECT(cached_ != 0, true)) return true; - parent_->FinishKeepRunning(); - return false; - } - - private: - IterationCount cached_; - State* const parent_; -}; - -inline BENCHMARK_ALWAYS_INLINE State::StateIterator State::begin() { - return StateIterator(this); -} -inline BENCHMARK_ALWAYS_INLINE State::StateIterator State::end() { - StartKeepRunning(); - return StateIterator(); -} - -namespace internal { - -typedef void(Function)(State&); - -// ------------------------------------------------------ -// Benchmark registration object. The BENCHMARK() macro expands -// into an internal::Benchmark* object. Various methods can -// be called on this object to change the properties of the benchmark. -// Each method returns "this" so that multiple method calls can -// chained into one expression. -class Benchmark { - public: - virtual ~Benchmark(); - - // Note: the following methods all return "this" so that multiple - // method calls can be chained together in one expression. - - // Run this benchmark once with "x" as the extra argument passed - // to the function. - // REQUIRES: The function passed to the constructor must accept an arg1. - Benchmark* Arg(int64_t x); - - // Run this benchmark with the given time unit for the generated output report - Benchmark* Unit(TimeUnit unit); - - // Run this benchmark once for a number of values picked from the - // range [start..limit]. (start and limit are always picked.) - // REQUIRES: The function passed to the constructor must accept an arg1. - Benchmark* Range(int64_t start, int64_t limit); - - // Run this benchmark once for all values in the range [start..limit] with - // specific step - // REQUIRES: The function passed to the constructor must accept an arg1. - Benchmark* DenseRange(int64_t start, int64_t limit, int step = 1); - - // Run this benchmark once with "args" as the extra arguments passed - // to the function. - // REQUIRES: The function passed to the constructor must accept arg1, arg2 ... - Benchmark* Args(const std::vector& args); - - // Equivalent to Args({x, y}) - // NOTE: This is a legacy C++03 interface provided for compatibility only. - // New code should use 'Args'. - Benchmark* ArgPair(int64_t x, int64_t y) { - std::vector args; - args.push_back(x); - args.push_back(y); - return Args(args); - } - - // Run this benchmark once for a number of values picked from the - // ranges [start..limit]. (starts and limits are always picked.) - // REQUIRES: The function passed to the constructor must accept arg1, arg2 ... - Benchmark* Ranges(const std::vector >& ranges); - - // Run this benchmark once for each combination of values in the (cartesian) - // product of the supplied argument lists. - // REQUIRES: The function passed to the constructor must accept arg1, arg2 ... - Benchmark* ArgsProduct(const std::vector >& arglists); - - // Equivalent to ArgNames({name}) - Benchmark* ArgName(const std::string& name); - - // Set the argument names to display in the benchmark name. If not called, - // only argument values will be shown. - Benchmark* ArgNames(const std::vector& names); - - // Equivalent to Ranges({{lo1, hi1}, {lo2, hi2}}). - // NOTE: This is a legacy C++03 interface provided for compatibility only. - // New code should use 'Ranges'. - Benchmark* RangePair(int64_t lo1, int64_t hi1, int64_t lo2, int64_t hi2) { - std::vector > ranges; - ranges.push_back(std::make_pair(lo1, hi1)); - ranges.push_back(std::make_pair(lo2, hi2)); - return Ranges(ranges); - } - - // Pass this benchmark object to *func, which can customize - // the benchmark by calling various methods like Arg, Args, - // Threads, etc. - Benchmark* Apply(void (*func)(Benchmark* benchmark)); - - // Set the range multiplier for non-dense range. If not called, the range - // multiplier kRangeMultiplier will be used. - Benchmark* RangeMultiplier(int multiplier); - - // Set the minimum amount of time to use when running this benchmark. This - // option overrides the `benchmark_min_time` flag. - // REQUIRES: `t > 0` and `Iterations` has not been called on this benchmark. - Benchmark* MinTime(double t); - - // Specify the amount of iterations that should be run by this benchmark. - // REQUIRES: 'n > 0' and `MinTime` has not been called on this benchmark. - // - // NOTE: This function should only be used when *exact* iteration control is - // needed and never to control or limit how long a benchmark runs, where - // `--benchmark_min_time=N` or `MinTime(...)` should be used instead. - Benchmark* Iterations(IterationCount n); - - // Specify the amount of times to repeat this benchmark. This option overrides - // the `benchmark_repetitions` flag. - // REQUIRES: `n > 0` - Benchmark* Repetitions(int n); - - // Specify if each repetition of the benchmark should be reported separately - // or if only the final statistics should be reported. If the benchmark - // is not repeated then the single result is always reported. - // Applies to *ALL* reporters (display and file). - Benchmark* ReportAggregatesOnly(bool value = true); - - // Same as ReportAggregatesOnly(), but applies to display reporter only. - Benchmark* DisplayAggregatesOnly(bool value = true); - - // By default, the CPU time is measured only for the main thread, which may - // be unrepresentative if the benchmark uses threads internally. If called, - // the total CPU time spent by all the threads will be measured instead. - // By default, the only the main thread CPU time will be measured. - Benchmark* MeasureProcessCPUTime(); - - // If a particular benchmark should use the Wall clock instead of the CPU time - // (be it either the CPU time of the main thread only (default), or the - // total CPU usage of the benchmark), call this method. If called, the elapsed - // (wall) time will be used to control how many iterations are run, and in the - // printing of items/second or MB/seconds values. - // If not called, the CPU time used by the benchmark will be used. - Benchmark* UseRealTime(); - - // If a benchmark must measure time manually (e.g. if GPU execution time is - // being - // measured), call this method. If called, each benchmark iteration should - // call - // SetIterationTime(seconds) to report the measured time, which will be used - // to control how many iterations are run, and in the printing of items/second - // or MB/second values. - Benchmark* UseManualTime(); - - // Set the asymptotic computational complexity for the benchmark. If called - // the asymptotic computational complexity will be shown on the output. - Benchmark* Complexity(BigO complexity = benchmark::oAuto); - - // Set the asymptotic computational complexity for the benchmark. If called - // the asymptotic computational complexity will be shown on the output. - Benchmark* Complexity(BigOFunc* complexity); - - // Add this statistics to be computed over all the values of benchmark run - Benchmark* ComputeStatistics(std::string name, StatisticsFunc* statistics); - - // Support for running multiple copies of the same benchmark concurrently - // in multiple threads. This may be useful when measuring the scaling - // of some piece of code. - - // Run one instance of this benchmark concurrently in t threads. - Benchmark* Threads(int t); - - // Pick a set of values T from [min_threads,max_threads]. - // min_threads and max_threads are always included in T. Run this - // benchmark once for each value in T. The benchmark run for a - // particular value t consists of t threads running the benchmark - // function concurrently. For example, consider: - // BENCHMARK(Foo)->ThreadRange(1,16); - // This will run the following benchmarks: - // Foo in 1 thread - // Foo in 2 threads - // Foo in 4 threads - // Foo in 8 threads - // Foo in 16 threads - Benchmark* ThreadRange(int min_threads, int max_threads); - - // For each value n in the range, run this benchmark once using n threads. - // min_threads and max_threads are always included in the range. - // stride specifies the increment. E.g. DenseThreadRange(1, 8, 3) starts - // a benchmark with 1, 4, 7 and 8 threads. - Benchmark* DenseThreadRange(int min_threads, int max_threads, int stride = 1); - - // Equivalent to ThreadRange(NumCPUs(), NumCPUs()) - Benchmark* ThreadPerCpu(); - - virtual void Run(State& state) = 0; - - protected: - explicit Benchmark(const char* name); - Benchmark(Benchmark const&); - void SetName(const char* name); - - int ArgsCnt() const; - - private: - friend class BenchmarkFamilies; - - std::string name_; - AggregationReportMode aggregation_report_mode_; - std::vector arg_names_; // Args for all benchmark runs - std::vector > args_; // Args for all benchmark runs - TimeUnit time_unit_; - int range_multiplier_; - double min_time_; - IterationCount iterations_; - int repetitions_; - bool measure_process_cpu_time_; - bool use_real_time_; - bool use_manual_time_; - BigO complexity_; - BigOFunc* complexity_lambda_; - std::vector statistics_; - std::vector thread_counts_; - - Benchmark& operator=(Benchmark const&); -}; - -} // namespace internal - -// Create and register a benchmark with the specified 'name' that invokes -// the specified functor 'fn'. -// -// RETURNS: A pointer to the registered benchmark. -internal::Benchmark* RegisterBenchmark(const char* name, - internal::Function* fn); - -#if defined(BENCHMARK_HAS_CXX11) -template -internal::Benchmark* RegisterBenchmark(const char* name, Lambda&& fn); -#endif - -// Remove all registered benchmarks. All pointers to previously registered -// benchmarks are invalidated. -void ClearRegisteredBenchmarks(); - -namespace internal { -// The class used to hold all Benchmarks created from static function. -// (ie those created using the BENCHMARK(...) macros. -class FunctionBenchmark : public Benchmark { - public: - FunctionBenchmark(const char* name, Function* func) - : Benchmark(name), func_(func) {} - - virtual void Run(State& st); - - private: - Function* func_; -}; - -#ifdef BENCHMARK_HAS_CXX11 -template -class LambdaBenchmark : public Benchmark { - public: - virtual void Run(State& st) { lambda_(st); } - - private: - template - LambdaBenchmark(const char* name, OLambda&& lam) - : Benchmark(name), lambda_(std::forward(lam)) {} - - LambdaBenchmark(LambdaBenchmark const&) = delete; - - private: - template - friend Benchmark* ::benchmark::RegisterBenchmark(const char*, Lam&&); - - Lambda lambda_; -}; -#endif - -} // namespace internal - -inline internal::Benchmark* RegisterBenchmark(const char* name, - internal::Function* fn) { - return internal::RegisterBenchmarkInternal( - ::new internal::FunctionBenchmark(name, fn)); -} - -#ifdef BENCHMARK_HAS_CXX11 -template -internal::Benchmark* RegisterBenchmark(const char* name, Lambda&& fn) { - using BenchType = - internal::LambdaBenchmark::type>; - return internal::RegisterBenchmarkInternal( - ::new BenchType(name, std::forward(fn))); -} -#endif - -#if defined(BENCHMARK_HAS_CXX11) && \ - (!defined(BENCHMARK_GCC_VERSION) || BENCHMARK_GCC_VERSION >= 409) -template -internal::Benchmark* RegisterBenchmark(const char* name, Lambda&& fn, - Args&&... args) { - return benchmark::RegisterBenchmark( - name, [=](benchmark::State& st) { fn(st, args...); }); -} -#else -#define BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK -#endif - -// The base class for all fixture tests. -class Fixture : public internal::Benchmark { - public: - Fixture() : internal::Benchmark("") {} - - virtual void Run(State& st) { - this->SetUp(st); - this->BenchmarkCase(st); - this->TearDown(st); - } - - // These will be deprecated ... - virtual void SetUp(const State&) {} - virtual void TearDown(const State&) {} - // ... In favor of these. - virtual void SetUp(State& st) { SetUp(const_cast(st)); } - virtual void TearDown(State& st) { TearDown(const_cast(st)); } - - protected: - virtual void BenchmarkCase(State&) = 0; -}; - -} // namespace benchmark - -// ------------------------------------------------------ -// Macro to register benchmarks - -// Check that __COUNTER__ is defined and that __COUNTER__ increases by 1 -// every time it is expanded. X + 1 == X + 0 is used in case X is defined to be -// empty. If X is empty the expression becomes (+1 == +0). -#if defined(__COUNTER__) && (__COUNTER__ + 1 == __COUNTER__ + 0) -#define BENCHMARK_PRIVATE_UNIQUE_ID __COUNTER__ -#else -#define BENCHMARK_PRIVATE_UNIQUE_ID __LINE__ -#endif - -// Helpers for generating unique variable names -#define BENCHMARK_PRIVATE_NAME(n) \ - BENCHMARK_PRIVATE_CONCAT(_benchmark_, BENCHMARK_PRIVATE_UNIQUE_ID, n) -#define BENCHMARK_PRIVATE_CONCAT(a, b, c) BENCHMARK_PRIVATE_CONCAT2(a, b, c) -#define BENCHMARK_PRIVATE_CONCAT2(a, b, c) a##b##c - -#define BENCHMARK_PRIVATE_DECLARE(n) \ - static ::benchmark::internal::Benchmark* BENCHMARK_PRIVATE_NAME(n) \ - BENCHMARK_UNUSED - -#define BENCHMARK(n) \ - BENCHMARK_PRIVATE_DECLARE(n) = \ - (::benchmark::internal::RegisterBenchmarkInternal( \ - new ::benchmark::internal::FunctionBenchmark(#n, n))) - -// Old-style macros -#define BENCHMARK_WITH_ARG(n, a) BENCHMARK(n)->Arg((a)) -#define BENCHMARK_WITH_ARG2(n, a1, a2) BENCHMARK(n)->Args({(a1), (a2)}) -#define BENCHMARK_WITH_UNIT(n, t) BENCHMARK(n)->Unit((t)) -#define BENCHMARK_RANGE(n, lo, hi) BENCHMARK(n)->Range((lo), (hi)) -#define BENCHMARK_RANGE2(n, l1, h1, l2, h2) \ - BENCHMARK(n)->RangePair({{(l1), (h1)}, {(l2), (h2)}}) - -#ifdef BENCHMARK_HAS_CXX11 - -// Register a benchmark which invokes the function specified by `func` -// with the additional arguments specified by `...`. -// -// For example: -// -// template ` -// void BM_takes_args(benchmark::State& state, ExtraArgs&&... extra_args) { -// [...] -//} -// /* Registers a benchmark named "BM_takes_args/int_string_test` */ -// BENCHMARK_CAPTURE(BM_takes_args, int_string_test, 42, std::string("abc")); -#define BENCHMARK_CAPTURE(func, test_case_name, ...) \ - BENCHMARK_PRIVATE_DECLARE(func) = \ - (::benchmark::internal::RegisterBenchmarkInternal( \ - new ::benchmark::internal::FunctionBenchmark( \ - #func "/" #test_case_name, \ - [](::benchmark::State& st) { func(st, __VA_ARGS__); }))) - -#endif // BENCHMARK_HAS_CXX11 - -// This will register a benchmark for a templatized function. For example: -// -// template -// void BM_Foo(int iters); -// -// BENCHMARK_TEMPLATE(BM_Foo, 1); -// -// will register BM_Foo<1> as a benchmark. -#define BENCHMARK_TEMPLATE1(n, a) \ - BENCHMARK_PRIVATE_DECLARE(n) = \ - (::benchmark::internal::RegisterBenchmarkInternal( \ - new ::benchmark::internal::FunctionBenchmark(#n "<" #a ">", n))) - -#define BENCHMARK_TEMPLATE2(n, a, b) \ - BENCHMARK_PRIVATE_DECLARE(n) = \ - (::benchmark::internal::RegisterBenchmarkInternal( \ - new ::benchmark::internal::FunctionBenchmark(#n "<" #a "," #b ">", \ - n))) - -#ifdef BENCHMARK_HAS_CXX11 -#define BENCHMARK_TEMPLATE(n, ...) \ - BENCHMARK_PRIVATE_DECLARE(n) = \ - (::benchmark::internal::RegisterBenchmarkInternal( \ - new ::benchmark::internal::FunctionBenchmark( \ - #n "<" #__VA_ARGS__ ">", n<__VA_ARGS__>))) -#else -#define BENCHMARK_TEMPLATE(n, a) BENCHMARK_TEMPLATE1(n, a) -#endif - -#define BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \ - class BaseClass##_##Method##_Benchmark : public BaseClass { \ - public: \ - BaseClass##_##Method##_Benchmark() : BaseClass() { \ - this->SetName(#BaseClass "/" #Method); \ - } \ - \ - protected: \ - virtual void BenchmarkCase(::benchmark::State&); \ - }; - -#define BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \ - class BaseClass##_##Method##_Benchmark : public BaseClass { \ - public: \ - BaseClass##_##Method##_Benchmark() : BaseClass() { \ - this->SetName(#BaseClass "<" #a ">/" #Method); \ - } \ - \ - protected: \ - virtual void BenchmarkCase(::benchmark::State&); \ - }; - -#define BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \ - class BaseClass##_##Method##_Benchmark : public BaseClass { \ - public: \ - BaseClass##_##Method##_Benchmark() : BaseClass() { \ - this->SetName(#BaseClass "<" #a "," #b ">/" #Method); \ - } \ - \ - protected: \ - virtual void BenchmarkCase(::benchmark::State&); \ - }; - -#ifdef BENCHMARK_HAS_CXX11 -#define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, ...) \ - class BaseClass##_##Method##_Benchmark : public BaseClass<__VA_ARGS__> { \ - public: \ - BaseClass##_##Method##_Benchmark() : BaseClass<__VA_ARGS__>() { \ - this->SetName(#BaseClass "<" #__VA_ARGS__ ">/" #Method); \ - } \ - \ - protected: \ - virtual void BenchmarkCase(::benchmark::State&); \ - }; -#else -#define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(n, a) \ - BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(n, a) -#endif - -#define BENCHMARK_DEFINE_F(BaseClass, Method) \ - BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \ - void BaseClass##_##Method##_Benchmark::BenchmarkCase - -#define BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a) \ - BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \ - void BaseClass##_##Method##_Benchmark::BenchmarkCase - -#define BENCHMARK_TEMPLATE2_DEFINE_F(BaseClass, Method, a, b) \ - BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \ - void BaseClass##_##Method##_Benchmark::BenchmarkCase - -#ifdef BENCHMARK_HAS_CXX11 -#define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, ...) \ - BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \ - void BaseClass##_##Method##_Benchmark::BenchmarkCase -#else -#define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, a) \ - BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a) -#endif - -#define BENCHMARK_REGISTER_F(BaseClass, Method) \ - BENCHMARK_PRIVATE_REGISTER_F(BaseClass##_##Method##_Benchmark) - -#define BENCHMARK_PRIVATE_REGISTER_F(TestName) \ - BENCHMARK_PRIVATE_DECLARE(TestName) = \ - (::benchmark::internal::RegisterBenchmarkInternal(new TestName())) - -// This macro will define and register a benchmark within a fixture class. -#define BENCHMARK_F(BaseClass, Method) \ - BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \ - BENCHMARK_REGISTER_F(BaseClass, Method); \ - void BaseClass##_##Method##_Benchmark::BenchmarkCase - -#define BENCHMARK_TEMPLATE1_F(BaseClass, Method, a) \ - BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \ - BENCHMARK_REGISTER_F(BaseClass, Method); \ - void BaseClass##_##Method##_Benchmark::BenchmarkCase - -#define BENCHMARK_TEMPLATE2_F(BaseClass, Method, a, b) \ - BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \ - BENCHMARK_REGISTER_F(BaseClass, Method); \ - void BaseClass##_##Method##_Benchmark::BenchmarkCase - -#ifdef BENCHMARK_HAS_CXX11 -#define BENCHMARK_TEMPLATE_F(BaseClass, Method, ...) \ - BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \ - BENCHMARK_REGISTER_F(BaseClass, Method); \ - void BaseClass##_##Method##_Benchmark::BenchmarkCase -#else -#define BENCHMARK_TEMPLATE_F(BaseClass, Method, a) \ - BENCHMARK_TEMPLATE1_F(BaseClass, Method, a) -#endif - -// Helper macro to create a main routine in a test that runs the benchmarks -#define BENCHMARK_MAIN() \ - int main(int argc, char** argv) { \ - ::benchmark::Initialize(&argc, argv); \ - if (::benchmark::ReportUnrecognizedArguments(argc, argv)) return 1; \ - ::benchmark::RunSpecifiedBenchmarks(); \ - } \ - int main(int, char**) - -// ------------------------------------------------------ -// Benchmark Reporters - -namespace benchmark { - -struct CPUInfo { - struct CacheInfo { - std::string type; - int level; - int size; - int num_sharing; - }; - - enum Scaling { - UNKNOWN, - ENABLED, - DISABLED - }; - - int num_cpus; - double cycles_per_second; - std::vector caches; - Scaling scaling; - std::vector load_avg; - - static const CPUInfo& Get(); - - private: - CPUInfo(); - BENCHMARK_DISALLOW_COPY_AND_ASSIGN(CPUInfo); -}; - -// Adding Struct for System Information -struct SystemInfo { - std::string name; - static const SystemInfo& Get(); - - private: - SystemInfo(); - BENCHMARK_DISALLOW_COPY_AND_ASSIGN(SystemInfo); -}; - -// BenchmarkName contains the components of the Benchmark's name -// which allows individual fields to be modified or cleared before -// building the final name using 'str()'. -struct BenchmarkName { - std::string function_name; - std::string args; - std::string min_time; - std::string iterations; - std::string repetitions; - std::string time_type; - std::string threads; - - // Return the full name of the benchmark with each non-empty - // field separated by a '/' - std::string str() const; -}; - -// Interface for custom benchmark result printers. -// By default, benchmark reports are printed to stdout. However an application -// can control the destination of the reports by calling -// RunSpecifiedBenchmarks and passing it a custom reporter object. -// The reporter object must implement the following interface. -class BenchmarkReporter { - public: - struct Context { - CPUInfo const& cpu_info; - SystemInfo const& sys_info; - // The number of chars in the longest benchmark name. - size_t name_field_width; - static const char* executable_name; - Context(); - }; - - struct Run { - static const int64_t no_repetition_index = -1; - enum RunType { RT_Iteration, RT_Aggregate }; - - Run() - : run_type(RT_Iteration), - error_occurred(false), - iterations(1), - threads(1), - time_unit(kNanosecond), - real_accumulated_time(0), - cpu_accumulated_time(0), - max_heapbytes_used(0), - complexity(oNone), - complexity_lambda(), - complexity_n(0), - report_big_o(false), - report_rms(false), - counters(), - has_memory_result(false), - allocs_per_iter(0.0), - max_bytes_used(0) {} - - std::string benchmark_name() const; - BenchmarkName run_name; - RunType run_type; - std::string aggregate_name; - std::string report_label; // Empty if not set by benchmark. - bool error_occurred; - std::string error_message; - - IterationCount iterations; - int64_t threads; - int64_t repetition_index; - int64_t repetitions; - TimeUnit time_unit; - double real_accumulated_time; - double cpu_accumulated_time; - - // Return a value representing the real time per iteration in the unit - // specified by 'time_unit'. - // NOTE: If 'iterations' is zero the returned value represents the - // accumulated time. - double GetAdjustedRealTime() const; - - // Return a value representing the cpu time per iteration in the unit - // specified by 'time_unit'. - // NOTE: If 'iterations' is zero the returned value represents the - // accumulated time. - double GetAdjustedCPUTime() const; - - // This is set to 0.0 if memory tracing is not enabled. - double max_heapbytes_used; - - // Keep track of arguments to compute asymptotic complexity - BigO complexity; - BigOFunc* complexity_lambda; - int64_t complexity_n; - - // what statistics to compute from the measurements - const std::vector* statistics; - - // Inform print function whether the current run is a complexity report - bool report_big_o; - bool report_rms; - - UserCounters counters; - - // Memory metrics. - bool has_memory_result; - double allocs_per_iter; - int64_t max_bytes_used; - }; - - // Construct a BenchmarkReporter with the output stream set to 'std::cout' - // and the error stream set to 'std::cerr' - BenchmarkReporter(); - - // Called once for every suite of benchmarks run. - // The parameter "context" contains information that the - // reporter may wish to use when generating its report, for example the - // platform under which the benchmarks are running. The benchmark run is - // never started if this function returns false, allowing the reporter - // to skip runs based on the context information. - virtual bool ReportContext(const Context& context) = 0; - - // Called once for each group of benchmark runs, gives information about - // cpu-time and heap memory usage during the benchmark run. If the group - // of runs contained more than two entries then 'report' contains additional - // elements representing the mean and standard deviation of those runs. - // Additionally if this group of runs was the last in a family of benchmarks - // 'reports' contains additional entries representing the asymptotic - // complexity and RMS of that benchmark family. - virtual void ReportRuns(const std::vector& report) = 0; - - // Called once and only once after ever group of benchmarks is run and - // reported. - virtual void Finalize() {} - - // REQUIRES: The object referenced by 'out' is valid for the lifetime - // of the reporter. - void SetOutputStream(std::ostream* out) { - assert(out); - output_stream_ = out; - } - - // REQUIRES: The object referenced by 'err' is valid for the lifetime - // of the reporter. - void SetErrorStream(std::ostream* err) { - assert(err); - error_stream_ = err; - } - - std::ostream& GetOutputStream() const { return *output_stream_; } - - std::ostream& GetErrorStream() const { return *error_stream_; } - - virtual ~BenchmarkReporter(); - - // Write a human readable string to 'out' representing the specified - // 'context'. - // REQUIRES: 'out' is non-null. - static void PrintBasicContext(std::ostream* out, Context const& context); - - private: - std::ostream* output_stream_; - std::ostream* error_stream_; -}; - -// Simple reporter that outputs benchmark data to the console. This is the -// default reporter used by RunSpecifiedBenchmarks(). -class ConsoleReporter : public BenchmarkReporter { - public: - enum OutputOptions { - OO_None = 0, - OO_Color = 1, - OO_Tabular = 2, - OO_ColorTabular = OO_Color | OO_Tabular, - OO_Defaults = OO_ColorTabular - }; - explicit ConsoleReporter(OutputOptions opts_ = OO_Defaults) - : output_options_(opts_), - name_field_width_(0), - prev_counters_(), - printed_header_(false) {} - - virtual bool ReportContext(const Context& context); - virtual void ReportRuns(const std::vector& reports); - - protected: - virtual void PrintRunData(const Run& report); - virtual void PrintHeader(const Run& report); - - OutputOptions output_options_; - size_t name_field_width_; - UserCounters prev_counters_; - bool printed_header_; -}; - -class JSONReporter : public BenchmarkReporter { - public: - JSONReporter() : first_report_(true) {} - virtual bool ReportContext(const Context& context); - virtual void ReportRuns(const std::vector& reports); - virtual void Finalize(); - - private: - void PrintRunData(const Run& report); - - bool first_report_; -}; - -class BENCHMARK_DEPRECATED_MSG( - "The CSV Reporter will be removed in a future release") CSVReporter - : public BenchmarkReporter { - public: - CSVReporter() : printed_header_(false) {} - virtual bool ReportContext(const Context& context); - virtual void ReportRuns(const std::vector& reports); - - private: - void PrintRunData(const Run& report); - - bool printed_header_; - std::set user_counter_names_; -}; - -// If a MemoryManager is registered, it can be used to collect and report -// allocation metrics for a run of the benchmark. -class MemoryManager { - public: - struct Result { - Result() : num_allocs(0), max_bytes_used(0) {} - - // The number of allocations made in total between Start and Stop. - int64_t num_allocs; - - // The peak memory use between Start and Stop. - int64_t max_bytes_used; - }; - - virtual ~MemoryManager() {} - - // Implement this to start recording allocation information. - virtual void Start() = 0; - - // Implement this to stop recording and fill out the given Result structure. - virtual void Stop(Result* result) = 0; -}; - -inline const char* GetTimeUnitString(TimeUnit unit) { - switch (unit) { - case kMillisecond: - return "ms"; - case kMicrosecond: - return "us"; - case kNanosecond: - return "ns"; - } - BENCHMARK_UNREACHABLE(); -} - -inline double GetTimeUnitMultiplier(TimeUnit unit) { - switch (unit) { - case kMillisecond: - return 1e3; - case kMicrosecond: - return 1e6; - case kNanosecond: - return 1e9; - } - BENCHMARK_UNREACHABLE(); -} - -} // namespace benchmark - -#endif // BENCHMARK_BENCHMARK_H_ diff --git a/benchmarks/thirdparty/benchmark/setup.py b/benchmarks/thirdparty/benchmark/setup.py deleted file mode 100755 index 5cdab10cf7..0000000000 --- a/benchmarks/thirdparty/benchmark/setup.py +++ /dev/null @@ -1,140 +0,0 @@ -import os -import posixpath -import re -import shutil -import sys - -from distutils import sysconfig -import setuptools -from setuptools.command import build_ext - - -HERE = os.path.dirname(os.path.abspath(__file__)) - - -IS_WINDOWS = sys.platform.startswith("win") - - -def _get_version(): - """Parse the version string from __init__.py.""" - with open( - os.path.join(HERE, "bindings", "python", "google_benchmark", "__init__.py") - ) as init_file: - try: - version_line = next( - line for line in init_file if line.startswith("__version__") - ) - except StopIteration: - raise ValueError("__version__ not defined in __init__.py") - else: - namespace = {} - exec(version_line, namespace) # pylint: disable=exec-used - return namespace["__version__"] - - -def _parse_requirements(path): - with open(os.path.join(HERE, path)) as requirements: - return [ - line.rstrip() - for line in requirements - if not (line.isspace() or line.startswith("#")) - ] - - -class BazelExtension(setuptools.Extension): - """A C/C++ extension that is defined as a Bazel BUILD target.""" - - def __init__(self, name, bazel_target): - self.bazel_target = bazel_target - self.relpath, self.target_name = posixpath.relpath(bazel_target, "//").split( - ":" - ) - setuptools.Extension.__init__(self, name, sources=[]) - - -class BuildBazelExtension(build_ext.build_ext): - """A command that runs Bazel to build a C/C++ extension.""" - - def run(self): - for ext in self.extensions: - self.bazel_build(ext) - build_ext.build_ext.run(self) - - def bazel_build(self, ext): - """Runs the bazel build to create the package.""" - with open("WORKSPACE", "r") as workspace: - workspace_contents = workspace.read() - - with open("WORKSPACE", "w") as workspace: - workspace.write( - re.sub( - r'(?<=path = ").*(?=", # May be overwritten by setup\.py\.)', - sysconfig.get_python_inc().replace(os.path.sep, posixpath.sep), - workspace_contents, - ) - ) - - if not os.path.exists(self.build_temp): - os.makedirs(self.build_temp) - - bazel_argv = [ - "bazel", - "build", - ext.bazel_target, - "--symlink_prefix=" + os.path.join(self.build_temp, "bazel-"), - "--compilation_mode=" + ("dbg" if self.debug else "opt"), - ] - - if IS_WINDOWS: - # Link with python*.lib. - for library_dir in self.library_dirs: - bazel_argv.append("--linkopt=/LIBPATH:" + library_dir) - - self.spawn(bazel_argv) - - shared_lib_suffix = '.dll' if IS_WINDOWS else '.so' - ext_bazel_bin_path = os.path.join( - self.build_temp, 'bazel-bin', - ext.relpath, ext.target_name + shared_lib_suffix) - - ext_dest_path = self.get_ext_fullpath(ext.name) - ext_dest_dir = os.path.dirname(ext_dest_path) - if not os.path.exists(ext_dest_dir): - os.makedirs(ext_dest_dir) - shutil.copyfile(ext_bazel_bin_path, ext_dest_path) - - -setuptools.setup( - name="google_benchmark", - version=_get_version(), - url="https://github.com/google/benchmark", - description="A library to benchmark code snippets.", - author="Google", - author_email="benchmark-py@google.com", - # Contained modules and scripts. - package_dir={"": "bindings/python"}, - packages=setuptools.find_packages("bindings/python"), - install_requires=_parse_requirements("bindings/python/requirements.txt"), - cmdclass=dict(build_ext=BuildBazelExtension), - ext_modules=[ - BazelExtension( - "google_benchmark._benchmark", - "//bindings/python/google_benchmark:_benchmark", - ) - ], - zip_safe=False, - # PyPI package information. - classifiers=[ - "Development Status :: 4 - Beta", - "Intended Audience :: Developers", - "Intended Audience :: Science/Research", - "License :: OSI Approved :: Apache Software License", - "Programming Language :: Python :: 3.6", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", - "Topic :: Software Development :: Testing", - "Topic :: System :: Benchmark", - ], - license="Apache 2.0", - keywords="benchmark", -) diff --git a/benchmarks/thirdparty/benchmark/src/CMakeLists.txt b/benchmarks/thirdparty/benchmark/src/CMakeLists.txt deleted file mode 100755 index 35d559eeae..0000000000 --- a/benchmarks/thirdparty/benchmark/src/CMakeLists.txt +++ /dev/null @@ -1,114 +0,0 @@ -# Allow the source files to find headers in src/ -include(GNUInstallDirs) -include_directories(${PROJECT_SOURCE_DIR}/src) - -if (DEFINED BENCHMARK_CXX_LINKER_FLAGS) - list(APPEND CMAKE_SHARED_LINKER_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}) - list(APPEND CMAKE_MODULE_LINKER_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}) -endif() - -file(GLOB - SOURCE_FILES - *.cc - ${PROJECT_SOURCE_DIR}/include/benchmark/*.h - ${CMAKE_CURRENT_SOURCE_DIR}/*.h) -file(GLOB BENCHMARK_MAIN "benchmark_main.cc") -foreach(item ${BENCHMARK_MAIN}) - list(REMOVE_ITEM SOURCE_FILES "${item}") -endforeach() - -add_library(benchmark ${SOURCE_FILES}) -add_library(benchmark::benchmark ALIAS benchmark) -set_target_properties(benchmark PROPERTIES - OUTPUT_NAME "benchmark" - VERSION ${GENERIC_LIB_VERSION} - SOVERSION ${GENERIC_LIB_SOVERSION} -) -target_include_directories(benchmark PUBLIC - $ - ) - -# Link threads. -target_link_libraries(benchmark ${BENCHMARK_CXX_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) -find_library(LIBRT rt) -if(LIBRT) - target_link_libraries(benchmark ${LIBRT}) -endif() - -if(CMAKE_BUILD_TYPE) - string(TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UPPER) -endif() -if(NOT CMAKE_THREAD_LIBS_INIT AND "${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}" MATCHES ".*-fsanitize=[^ ]*address.*") - message(WARNING "CMake's FindThreads.cmake did not fail, but CMAKE_THREAD_LIBS_INIT ended up being empty. This was fixed in https://github.com/Kitware/CMake/commit/d53317130e84898c5328c237186dbd995aaf1c12 Let's guess that -pthread is sufficient.") - target_link_libraries(benchmark -pthread) -endif() - -# We need extra libraries on Windows -if(${CMAKE_SYSTEM_NAME} MATCHES "Windows") - target_link_libraries(benchmark shlwapi) -endif() - -# We need extra libraries on Solaris -if(${CMAKE_SYSTEM_NAME} MATCHES "SunOS") - target_link_libraries(benchmark kstat) -endif() - -# Benchmark main library -add_library(benchmark_main "benchmark_main.cc") -add_library(benchmark::benchmark_main ALIAS benchmark_main) -set_target_properties(benchmark_main PROPERTIES - OUTPUT_NAME "benchmark_main" - VERSION ${GENERIC_LIB_VERSION} - SOVERSION ${GENERIC_LIB_SOVERSION} -) -target_include_directories(benchmark PUBLIC - $ - ) -target_link_libraries(benchmark_main benchmark::benchmark) - - -set(generated_dir "${CMAKE_CURRENT_BINARY_DIR}/generated") - -set(version_config "${generated_dir}/${PROJECT_NAME}ConfigVersion.cmake") -set(project_config "${generated_dir}/${PROJECT_NAME}Config.cmake") -set(pkg_config "${generated_dir}/${PROJECT_NAME}.pc") -set(targets_export_name "${PROJECT_NAME}Targets") - -set(namespace "${PROJECT_NAME}::") - -include(CMakePackageConfigHelpers) -write_basic_package_version_file( - "${version_config}" VERSION ${GENERIC_LIB_VERSION} COMPATIBILITY SameMajorVersion -) - -configure_file("${PROJECT_SOURCE_DIR}/cmake/Config.cmake.in" "${project_config}" @ONLY) -configure_file("${PROJECT_SOURCE_DIR}/cmake/benchmark.pc.in" "${pkg_config}" @ONLY) - -if (BENCHMARK_ENABLE_INSTALL) - # Install target (will install the library to specified CMAKE_INSTALL_PREFIX variable) - install( - TARGETS benchmark benchmark_main - EXPORT ${targets_export_name} - ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} - LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} - RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} - INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) - - install( - DIRECTORY "${PROJECT_SOURCE_DIR}/include/benchmark" - DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} - FILES_MATCHING PATTERN "*.*h") - - install( - FILES "${project_config}" "${version_config}" - DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}") - - install( - FILES "${pkg_config}" - DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig") - - install( - EXPORT "${targets_export_name}" - NAMESPACE "${namespace}" - DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}") -endif() diff --git a/benchmarks/thirdparty/benchmark/src/arraysize.h b/benchmarks/thirdparty/benchmark/src/arraysize.h deleted file mode 100755 index 51a50f2dff..0000000000 --- a/benchmarks/thirdparty/benchmark/src/arraysize.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef BENCHMARK_ARRAYSIZE_H_ -#define BENCHMARK_ARRAYSIZE_H_ - -#include "internal_macros.h" - -namespace benchmark { -namespace internal { -// The arraysize(arr) macro returns the # of elements in an array arr. -// The expression is a compile-time constant, and therefore can be -// used in defining new arrays, for example. If you use arraysize on -// a pointer by mistake, you will get a compile-time error. -// - -// This template function declaration is used in defining arraysize. -// Note that the function doesn't need an implementation, as we only -// use its type. -template -char (&ArraySizeHelper(T (&array)[N]))[N]; - -// That gcc wants both of these prototypes seems mysterious. VC, for -// its part, can't decide which to use (another mystery). Matching of -// template overloads: the final frontier. -#ifndef COMPILER_MSVC -template -char (&ArraySizeHelper(const T (&array)[N]))[N]; -#endif - -#define arraysize(array) (sizeof(::benchmark::internal::ArraySizeHelper(array))) - -} // end namespace internal -} // end namespace benchmark - -#endif // BENCHMARK_ARRAYSIZE_H_ diff --git a/benchmarks/thirdparty/benchmark/src/benchmark.cc b/benchmarks/thirdparty/benchmark/src/benchmark.cc deleted file mode 100755 index 1c049f2884..0000000000 --- a/benchmarks/thirdparty/benchmark/src/benchmark.cc +++ /dev/null @@ -1,499 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "benchmark/benchmark.h" -#include "benchmark_api_internal.h" -#include "benchmark_runner.h" -#include "internal_macros.h" - -#ifndef BENCHMARK_OS_WINDOWS -#ifndef BENCHMARK_OS_FUCHSIA -#include -#endif -#include -#include -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "check.h" -#include "colorprint.h" -#include "commandlineflags.h" -#include "complexity.h" -#include "counter.h" -#include "internal_macros.h" -#include "log.h" -#include "mutex.h" -#include "re.h" -#include "statistics.h" -#include "string_util.h" -#include "thread_manager.h" -#include "thread_timer.h" - -// Print a list of benchmarks. This option overrides all other options. -DEFINE_bool(benchmark_list_tests, false); - -// A regular expression that specifies the set of benchmarks to execute. If -// this flag is empty, or if this flag is the string \"all\", all benchmarks -// linked into the binary are run. -DEFINE_string(benchmark_filter, "."); - -// Minimum number of seconds we should run benchmark before results are -// considered significant. For cpu-time based tests, this is the lower bound -// on the total cpu time used by all threads that make up the test. For -// real-time based tests, this is the lower bound on the elapsed time of the -// benchmark execution, regardless of number of threads. -DEFINE_double(benchmark_min_time, 0.5); - -// The number of runs of each benchmark. If greater than 1, the mean and -// standard deviation of the runs will be reported. -DEFINE_int32(benchmark_repetitions, 1); - -// Report the result of each benchmark repetitions. When 'true' is specified -// only the mean, standard deviation, and other statistics are reported for -// repeated benchmarks. Affects all reporters. -DEFINE_bool(benchmark_report_aggregates_only, false); - -// Display the result of each benchmark repetitions. When 'true' is specified -// only the mean, standard deviation, and other statistics are displayed for -// repeated benchmarks. Unlike benchmark_report_aggregates_only, only affects -// the display reporter, but *NOT* file reporter, which will still contain -// all the output. -DEFINE_bool(benchmark_display_aggregates_only, false); - -// The format to use for console output. -// Valid values are 'console', 'json', or 'csv'. -DEFINE_string(benchmark_format, "console"); - -// The format to use for file output. -// Valid values are 'console', 'json', or 'csv'. -DEFINE_string(benchmark_out_format, "json"); - -// The file to write additional output to. -DEFINE_string(benchmark_out, ""); - -// Whether to use colors in the output. Valid values: -// 'true'/'yes'/1, 'false'/'no'/0, and 'auto'. 'auto' means to use colors if -// the output is being sent to a terminal and the TERM environment variable is -// set to a terminal type that supports colors. -DEFINE_string(benchmark_color, "auto"); - -// Whether to use tabular format when printing user counters to the console. -// Valid values: 'true'/'yes'/1, 'false'/'no'/0. Defaults to false. -DEFINE_bool(benchmark_counters_tabular, false); - -// The level of verbose logging to output -DEFINE_int32(v, 0); - -namespace benchmark { - -namespace internal { - -// FIXME: wouldn't LTO mess this up? -void UseCharPointer(char const volatile*) {} - -} // namespace internal - -State::State(IterationCount max_iters, const std::vector& ranges, - int thread_i, int n_threads, internal::ThreadTimer* timer, - internal::ThreadManager* manager) - : total_iterations_(0), - batch_leftover_(0), - max_iterations(max_iters), - started_(false), - finished_(false), - error_occurred_(false), - range_(ranges), - complexity_n_(0), - counters(), - thread_index(thread_i), - threads(n_threads), - timer_(timer), - manager_(manager) { - CHECK(max_iterations != 0) << "At least one iteration must be run"; - CHECK_LT(thread_index, threads) << "thread_index must be less than threads"; - - // Note: The use of offsetof below is technically undefined until C++17 - // because State is not a standard layout type. However, all compilers - // currently provide well-defined behavior as an extension (which is - // demonstrated since constexpr evaluation must diagnose all undefined - // behavior). However, GCC and Clang also warn about this use of offsetof, - // which must be suppressed. -#if defined(__INTEL_COMPILER) -#pragma warning push -#pragma warning(disable : 1875) -#elif defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Winvalid-offsetof" -#endif - // Offset tests to ensure commonly accessed data is on the first cache line. - const int cache_line_size = 64; - static_assert(offsetof(State, error_occurred_) <= - (cache_line_size - sizeof(error_occurred_)), - ""); -#if defined(__INTEL_COMPILER) -#pragma warning pop -#elif defined(__GNUC__) -#pragma GCC diagnostic pop -#endif -} - -void State::PauseTiming() { - // Add in time accumulated so far - CHECK(started_ && !finished_ && !error_occurred_); - timer_->StopTimer(); -} - -void State::ResumeTiming() { - CHECK(started_ && !finished_ && !error_occurred_); - timer_->StartTimer(); -} - -void State::SkipWithError(const char* msg) { - CHECK(msg); - error_occurred_ = true; - { - MutexLock l(manager_->GetBenchmarkMutex()); - if (manager_->results.has_error_ == false) { - manager_->results.error_message_ = msg; - manager_->results.has_error_ = true; - } - } - total_iterations_ = 0; - if (timer_->running()) timer_->StopTimer(); -} - -void State::SetIterationTime(double seconds) { - timer_->SetIterationTime(seconds); -} - -void State::SetLabel(const char* label) { - MutexLock l(manager_->GetBenchmarkMutex()); - manager_->results.report_label_ = label; -} - -void State::StartKeepRunning() { - CHECK(!started_ && !finished_); - started_ = true; - total_iterations_ = error_occurred_ ? 0 : max_iterations; - manager_->StartStopBarrier(); - if (!error_occurred_) ResumeTiming(); -} - -void State::FinishKeepRunning() { - CHECK(started_ && (!finished_ || error_occurred_)); - if (!error_occurred_) { - PauseTiming(); - } - // Total iterations has now wrapped around past 0. Fix this. - total_iterations_ = 0; - finished_ = true; - manager_->StartStopBarrier(); -} - -namespace internal { -namespace { - -void RunBenchmarks(const std::vector& benchmarks, - BenchmarkReporter* display_reporter, - BenchmarkReporter* file_reporter) { - // Note the file_reporter can be null. - CHECK(display_reporter != nullptr); - - // Determine the width of the name field using a minimum width of 10. - bool might_have_aggregates = FLAGS_benchmark_repetitions > 1; - size_t name_field_width = 10; - size_t stat_field_width = 0; - for (const BenchmarkInstance& benchmark : benchmarks) { - name_field_width = - std::max(name_field_width, benchmark.name.str().size()); - might_have_aggregates |= benchmark.repetitions > 1; - - for (const auto& Stat : *benchmark.statistics) - stat_field_width = std::max(stat_field_width, Stat.name_.size()); - } - if (might_have_aggregates) name_field_width += 1 + stat_field_width; - - // Print header here - BenchmarkReporter::Context context; - context.name_field_width = name_field_width; - - // Keep track of running times of all instances of current benchmark - std::vector complexity_reports; - - // We flush streams after invoking reporter methods that write to them. This - // ensures users get timely updates even when streams are not line-buffered. - auto flushStreams = [](BenchmarkReporter* reporter) { - if (!reporter) return; - std::flush(reporter->GetOutputStream()); - std::flush(reporter->GetErrorStream()); - }; - - if (display_reporter->ReportContext(context) && - (!file_reporter || file_reporter->ReportContext(context))) { - flushStreams(display_reporter); - flushStreams(file_reporter); - - for (const auto& benchmark : benchmarks) { - RunResults run_results = RunBenchmark(benchmark, &complexity_reports); - - auto report = [&run_results](BenchmarkReporter* reporter, - bool report_aggregates_only) { - assert(reporter); - // If there are no aggregates, do output non-aggregates. - report_aggregates_only &= !run_results.aggregates_only.empty(); - if (!report_aggregates_only) - reporter->ReportRuns(run_results.non_aggregates); - if (!run_results.aggregates_only.empty()) - reporter->ReportRuns(run_results.aggregates_only); - }; - - report(display_reporter, run_results.display_report_aggregates_only); - if (file_reporter) - report(file_reporter, run_results.file_report_aggregates_only); - - flushStreams(display_reporter); - flushStreams(file_reporter); - } - } - display_reporter->Finalize(); - if (file_reporter) file_reporter->Finalize(); - flushStreams(display_reporter); - flushStreams(file_reporter); -} - -// Disable deprecated warnings temporarily because we need to reference -// CSVReporter but don't want to trigger -Werror=-Wdeprecated-declarations -#ifdef __GNUC__ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" -#endif - -std::unique_ptr CreateReporter( - std::string const& name, ConsoleReporter::OutputOptions output_opts) { - typedef std::unique_ptr PtrType; - if (name == "console") { - return PtrType(new ConsoleReporter(output_opts)); - } else if (name == "json") { - return PtrType(new JSONReporter); - } else if (name == "csv") { - return PtrType(new CSVReporter); - } else { - std::cerr << "Unexpected format: '" << name << "'\n"; - std::exit(1); - } -} - -#ifdef __GNUC__ -#pragma GCC diagnostic pop -#endif - -} // end namespace - -bool IsZero(double n) { - return std::abs(n) < std::numeric_limits::epsilon(); -} - -ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color) { - int output_opts = ConsoleReporter::OO_Defaults; - auto is_benchmark_color = [force_no_color]() -> bool { - if (force_no_color) { - return false; - } - if (FLAGS_benchmark_color == "auto") { - return IsColorTerminal(); - } - return IsTruthyFlagValue(FLAGS_benchmark_color); - }; - if (is_benchmark_color()) { - output_opts |= ConsoleReporter::OO_Color; - } else { - output_opts &= ~ConsoleReporter::OO_Color; - } - if (FLAGS_benchmark_counters_tabular) { - output_opts |= ConsoleReporter::OO_Tabular; - } else { - output_opts &= ~ConsoleReporter::OO_Tabular; - } - return static_cast(output_opts); -} - -} // end namespace internal - -size_t RunSpecifiedBenchmarks() { - return RunSpecifiedBenchmarks(nullptr, nullptr); -} - -size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter) { - return RunSpecifiedBenchmarks(display_reporter, nullptr); -} - -size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter, - BenchmarkReporter* file_reporter) { - std::string spec = FLAGS_benchmark_filter; - if (spec.empty() || spec == "all") - spec = "."; // Regexp that matches all benchmarks - - // Setup the reporters - std::ofstream output_file; - std::unique_ptr default_display_reporter; - std::unique_ptr default_file_reporter; - if (!display_reporter) { - default_display_reporter = internal::CreateReporter( - FLAGS_benchmark_format, internal::GetOutputOptions()); - display_reporter = default_display_reporter.get(); - } - auto& Out = display_reporter->GetOutputStream(); - auto& Err = display_reporter->GetErrorStream(); - - std::string const& fname = FLAGS_benchmark_out; - if (fname.empty() && file_reporter) { - Err << "A custom file reporter was provided but " - "--benchmark_out= was not specified." - << std::endl; - std::exit(1); - } - if (!fname.empty()) { - output_file.open(fname); - if (!output_file.is_open()) { - Err << "invalid file name: '" << fname << std::endl; - std::exit(1); - } - if (!file_reporter) { - default_file_reporter = internal::CreateReporter( - FLAGS_benchmark_out_format, ConsoleReporter::OO_None); - file_reporter = default_file_reporter.get(); - } - file_reporter->SetOutputStream(&output_file); - file_reporter->SetErrorStream(&output_file); - } - - std::vector benchmarks; - if (!FindBenchmarksInternal(spec, &benchmarks, &Err)) return 0; - - if (benchmarks.empty()) { - Err << "Failed to match any benchmarks against regex: " << spec << "\n"; - return 0; - } - - if (FLAGS_benchmark_list_tests) { - for (auto const& benchmark : benchmarks) - Out << benchmark.name.str() << "\n"; - } else { - internal::RunBenchmarks(benchmarks, display_reporter, file_reporter); - } - - return benchmarks.size(); -} - -void RegisterMemoryManager(MemoryManager* manager) { - internal::memory_manager = manager; -} - -namespace internal { - -void PrintUsageAndExit() { - fprintf(stdout, - "benchmark" - " [--benchmark_list_tests={true|false}]\n" - " [--benchmark_filter=]\n" - " [--benchmark_min_time=]\n" - " [--benchmark_repetitions=]\n" - " [--benchmark_report_aggregates_only={true|false}]\n" - " [--benchmark_display_aggregates_only={true|false}]\n" - " [--benchmark_format=]\n" - " [--benchmark_out=]\n" - " [--benchmark_out_format=]\n" - " [--benchmark_color={auto|true|false}]\n" - " [--benchmark_counters_tabular={true|false}]\n" - " [--v=]\n"); - exit(0); -} - -void ParseCommandLineFlags(int* argc, char** argv) { - using namespace benchmark; - BenchmarkReporter::Context::executable_name = - (argc && *argc > 0) ? argv[0] : "unknown"; - for (int i = 1; argc && i < *argc; ++i) { - if (ParseBoolFlag(argv[i], "benchmark_list_tests", - &FLAGS_benchmark_list_tests) || - ParseStringFlag(argv[i], "benchmark_filter", &FLAGS_benchmark_filter) || - ParseDoubleFlag(argv[i], "benchmark_min_time", - &FLAGS_benchmark_min_time) || - ParseInt32Flag(argv[i], "benchmark_repetitions", - &FLAGS_benchmark_repetitions) || - ParseBoolFlag(argv[i], "benchmark_report_aggregates_only", - &FLAGS_benchmark_report_aggregates_only) || - ParseBoolFlag(argv[i], "benchmark_display_aggregates_only", - &FLAGS_benchmark_display_aggregates_only) || - ParseStringFlag(argv[i], "benchmark_format", &FLAGS_benchmark_format) || - ParseStringFlag(argv[i], "benchmark_out", &FLAGS_benchmark_out) || - ParseStringFlag(argv[i], "benchmark_out_format", - &FLAGS_benchmark_out_format) || - ParseStringFlag(argv[i], "benchmark_color", &FLAGS_benchmark_color) || - // "color_print" is the deprecated name for "benchmark_color". - // TODO: Remove this. - ParseStringFlag(argv[i], "color_print", &FLAGS_benchmark_color) || - ParseBoolFlag(argv[i], "benchmark_counters_tabular", - &FLAGS_benchmark_counters_tabular) || - ParseInt32Flag(argv[i], "v", &FLAGS_v)) { - for (int j = i; j != *argc - 1; ++j) argv[j] = argv[j + 1]; - - --(*argc); - --i; - } else if (IsFlag(argv[i], "help")) { - PrintUsageAndExit(); - } - } - for (auto const* flag : - {&FLAGS_benchmark_format, &FLAGS_benchmark_out_format}) - if (*flag != "console" && *flag != "json" && *flag != "csv") { - PrintUsageAndExit(); - } - if (FLAGS_benchmark_color.empty()) { - PrintUsageAndExit(); - } -} - -int InitializeStreams() { - static std::ios_base::Init init; - return 0; -} - -} // end namespace internal - -void Initialize(int* argc, char** argv) { - internal::ParseCommandLineFlags(argc, argv); - internal::LogLevel() = FLAGS_v; -} - -bool ReportUnrecognizedArguments(int argc, char** argv) { - for (int i = 1; i < argc; ++i) { - fprintf(stderr, "%s: error: unrecognized command-line flag: %s\n", argv[0], - argv[i]); - } - return argc > 1; -} - -} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/benchmark_api_internal.cc b/benchmarks/thirdparty/benchmark/src/benchmark_api_internal.cc deleted file mode 100755 index d468a257e3..0000000000 --- a/benchmarks/thirdparty/benchmark/src/benchmark_api_internal.cc +++ /dev/null @@ -1,15 +0,0 @@ -#include "benchmark_api_internal.h" - -namespace benchmark { -namespace internal { - -State BenchmarkInstance::Run(IterationCount iters, int thread_id, - internal::ThreadTimer* timer, - internal::ThreadManager* manager) const { - State st(iters, arg, thread_id, threads, timer, manager); - benchmark->Run(st); - return st; -} - -} // internal -} // benchmark diff --git a/benchmarks/thirdparty/benchmark/src/benchmark_api_internal.h b/benchmarks/thirdparty/benchmark/src/benchmark_api_internal.h deleted file mode 100755 index 264eff95c5..0000000000 --- a/benchmarks/thirdparty/benchmark/src/benchmark_api_internal.h +++ /dev/null @@ -1,53 +0,0 @@ -#ifndef BENCHMARK_API_INTERNAL_H -#define BENCHMARK_API_INTERNAL_H - -#include "benchmark/benchmark.h" -#include "commandlineflags.h" - -#include -#include -#include -#include -#include -#include - -namespace benchmark { -namespace internal { - -// Information kept per benchmark we may want to run -struct BenchmarkInstance { - BenchmarkName name; - Benchmark* benchmark; - AggregationReportMode aggregation_report_mode; - std::vector arg; - TimeUnit time_unit; - int range_multiplier; - bool measure_process_cpu_time; - bool use_real_time; - bool use_manual_time; - BigO complexity; - BigOFunc* complexity_lambda; - UserCounters counters; - const std::vector* statistics; - bool last_benchmark_instance; - int repetitions; - double min_time; - IterationCount iterations; - int threads; // Number of concurrent threads to us - - State Run(IterationCount iters, int thread_id, internal::ThreadTimer* timer, - internal::ThreadManager* manager) const; -}; - -bool FindBenchmarksInternal(const std::string& re, - std::vector* benchmarks, - std::ostream* Err); - -bool IsZero(double n); - -ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color = false); - -} // end namespace internal -} // end namespace benchmark - -#endif // BENCHMARK_API_INTERNAL_H diff --git a/benchmarks/thirdparty/benchmark/src/benchmark_main.cc b/benchmarks/thirdparty/benchmark/src/benchmark_main.cc deleted file mode 100755 index b3b2478314..0000000000 --- a/benchmarks/thirdparty/benchmark/src/benchmark_main.cc +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2018 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "benchmark/benchmark.h" - -BENCHMARK_MAIN(); diff --git a/benchmarks/thirdparty/benchmark/src/benchmark_name.cc b/benchmarks/thirdparty/benchmark/src/benchmark_name.cc deleted file mode 100755 index 2a17ebce27..0000000000 --- a/benchmarks/thirdparty/benchmark/src/benchmark_name.cc +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include - -namespace benchmark { - -namespace { - -// Compute the total size of a pack of std::strings -size_t size_impl() { return 0; } - -template -size_t size_impl(const Head& head, const Tail&... tail) { - return head.size() + size_impl(tail...); -} - -// Join a pack of std::strings using a delimiter -// TODO: use absl::StrJoin -void join_impl(std::string&, char) {} - -template -void join_impl(std::string& s, const char delimiter, const Head& head, - const Tail&... tail) { - if (!s.empty() && !head.empty()) { - s += delimiter; - } - - s += head; - - join_impl(s, delimiter, tail...); -} - -template -std::string join(char delimiter, const Ts&... ts) { - std::string s; - s.reserve(sizeof...(Ts) + size_impl(ts...)); - join_impl(s, delimiter, ts...); - return s; -} -} // namespace - -std::string BenchmarkName::str() const { - return join('/', function_name, args, min_time, iterations, repetitions, - time_type, threads); -} -} // namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/benchmark_register.cc b/benchmarks/thirdparty/benchmark/src/benchmark_register.cc deleted file mode 100755 index 65d9944f4f..0000000000 --- a/benchmarks/thirdparty/benchmark/src/benchmark_register.cc +++ /dev/null @@ -1,515 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "benchmark_register.h" - -#ifndef BENCHMARK_OS_WINDOWS -#ifndef BENCHMARK_OS_FUCHSIA -#include -#endif -#include -#include -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#ifndef __STDC_FORMAT_MACROS -#define __STDC_FORMAT_MACROS -#endif -#include - -#include "benchmark/benchmark.h" -#include "benchmark_api_internal.h" -#include "check.h" -#include "commandlineflags.h" -#include "complexity.h" -#include "internal_macros.h" -#include "log.h" -#include "mutex.h" -#include "re.h" -#include "statistics.h" -#include "string_util.h" -#include "timers.h" - -namespace benchmark { - -namespace { -// For non-dense Range, intermediate values are powers of kRangeMultiplier. -static const int kRangeMultiplier = 8; -// The size of a benchmark family determines is the number of inputs to repeat -// the benchmark on. If this is "large" then warn the user during configuration. -static const size_t kMaxFamilySize = 100; -} // end namespace - -namespace internal { - -//=============================================================================// -// BenchmarkFamilies -//=============================================================================// - -// Class for managing registered benchmarks. Note that each registered -// benchmark identifies a family of related benchmarks to run. -class BenchmarkFamilies { - public: - static BenchmarkFamilies* GetInstance(); - - // Registers a benchmark family and returns the index assigned to it. - size_t AddBenchmark(std::unique_ptr family); - - // Clear all registered benchmark families. - void ClearBenchmarks(); - - // Extract the list of benchmark instances that match the specified - // regular expression. - bool FindBenchmarks(std::string re, - std::vector* benchmarks, - std::ostream* Err); - - private: - BenchmarkFamilies() {} - - std::vector> families_; - Mutex mutex_; -}; - -BenchmarkFamilies* BenchmarkFamilies::GetInstance() { - static BenchmarkFamilies instance; - return &instance; -} - -size_t BenchmarkFamilies::AddBenchmark(std::unique_ptr family) { - MutexLock l(mutex_); - size_t index = families_.size(); - families_.push_back(std::move(family)); - return index; -} - -void BenchmarkFamilies::ClearBenchmarks() { - MutexLock l(mutex_); - families_.clear(); - families_.shrink_to_fit(); -} - -bool BenchmarkFamilies::FindBenchmarks( - std::string spec, std::vector* benchmarks, - std::ostream* ErrStream) { - CHECK(ErrStream); - auto& Err = *ErrStream; - // Make regular expression out of command-line flag - std::string error_msg; - Regex re; - bool isNegativeFilter = false; - if (spec[0] == '-') { - spec.replace(0, 1, ""); - isNegativeFilter = true; - } - if (!re.Init(spec, &error_msg)) { - Err << "Could not compile benchmark re: " << error_msg << std::endl; - return false; - } - - // Special list of thread counts to use when none are specified - const std::vector one_thread = {1}; - - MutexLock l(mutex_); - for (std::unique_ptr& family : families_) { - // Family was deleted or benchmark doesn't match - if (!family) continue; - - if (family->ArgsCnt() == -1) { - family->Args({}); - } - const std::vector* thread_counts = - (family->thread_counts_.empty() - ? &one_thread - : &static_cast&>(family->thread_counts_)); - const size_t family_size = family->args_.size() * thread_counts->size(); - // The benchmark will be run at least 'family_size' different inputs. - // If 'family_size' is very large warn the user. - if (family_size > kMaxFamilySize) { - Err << "The number of inputs is very large. " << family->name_ - << " will be repeated at least " << family_size << " times.\n"; - } - // reserve in the special case the regex ".", since we know the final - // family size. - if (spec == ".") benchmarks->reserve(family_size); - - for (auto const& args : family->args_) { - for (int num_threads : *thread_counts) { - BenchmarkInstance instance; - instance.name.function_name = family->name_; - instance.benchmark = family.get(); - instance.aggregation_report_mode = family->aggregation_report_mode_; - instance.arg = args; - instance.time_unit = family->time_unit_; - instance.range_multiplier = family->range_multiplier_; - instance.min_time = family->min_time_; - instance.iterations = family->iterations_; - instance.repetitions = family->repetitions_; - instance.measure_process_cpu_time = family->measure_process_cpu_time_; - instance.use_real_time = family->use_real_time_; - instance.use_manual_time = family->use_manual_time_; - instance.complexity = family->complexity_; - instance.complexity_lambda = family->complexity_lambda_; - instance.statistics = &family->statistics_; - instance.threads = num_threads; - - // Add arguments to instance name - size_t arg_i = 0; - for (auto const& arg : args) { - if (!instance.name.args.empty()) { - instance.name.args += '/'; - } - - if (arg_i < family->arg_names_.size()) { - const auto& arg_name = family->arg_names_[arg_i]; - if (!arg_name.empty()) { - instance.name.args += StrFormat("%s:", arg_name.c_str()); - } - } - - instance.name.args += StrFormat("%" PRId64, arg); - ++arg_i; - } - - if (!IsZero(family->min_time_)) - instance.name.min_time = - StrFormat("min_time:%0.3f", family->min_time_); - if (family->iterations_ != 0) { - instance.name.iterations = - StrFormat("iterations:%lu", - static_cast(family->iterations_)); - } - if (family->repetitions_ != 0) - instance.name.repetitions = - StrFormat("repeats:%d", family->repetitions_); - - if (family->measure_process_cpu_time_) { - instance.name.time_type = "process_time"; - } - - if (family->use_manual_time_) { - if (!instance.name.time_type.empty()) { - instance.name.time_type += '/'; - } - instance.name.time_type += "manual_time"; - } else if (family->use_real_time_) { - if (!instance.name.time_type.empty()) { - instance.name.time_type += '/'; - } - instance.name.time_type += "real_time"; - } - - // Add the number of threads used to the name - if (!family->thread_counts_.empty()) { - instance.name.threads = StrFormat("threads:%d", instance.threads); - } - - const auto full_name = instance.name.str(); - if ((re.Match(full_name) && !isNegativeFilter) || - (!re.Match(full_name) && isNegativeFilter)) { - instance.last_benchmark_instance = (&args == &family->args_.back()); - benchmarks->push_back(std::move(instance)); - } - } - } - } - return true; -} - -Benchmark* RegisterBenchmarkInternal(Benchmark* bench) { - std::unique_ptr bench_ptr(bench); - BenchmarkFamilies* families = BenchmarkFamilies::GetInstance(); - families->AddBenchmark(std::move(bench_ptr)); - return bench; -} - -// FIXME: This function is a hack so that benchmark.cc can access -// `BenchmarkFamilies` -bool FindBenchmarksInternal(const std::string& re, - std::vector* benchmarks, - std::ostream* Err) { - return BenchmarkFamilies::GetInstance()->FindBenchmarks(re, benchmarks, Err); -} - -//=============================================================================// -// Benchmark -//=============================================================================// - -Benchmark::Benchmark(const char* name) - : name_(name), - aggregation_report_mode_(ARM_Unspecified), - time_unit_(kNanosecond), - range_multiplier_(kRangeMultiplier), - min_time_(0), - iterations_(0), - repetitions_(0), - measure_process_cpu_time_(false), - use_real_time_(false), - use_manual_time_(false), - complexity_(oNone), - complexity_lambda_(nullptr) { - ComputeStatistics("mean", StatisticsMean); - ComputeStatistics("median", StatisticsMedian); - ComputeStatistics("stddev", StatisticsStdDev); -} - -Benchmark::~Benchmark() {} - -Benchmark* Benchmark::Arg(int64_t x) { - CHECK(ArgsCnt() == -1 || ArgsCnt() == 1); - args_.push_back({x}); - return this; -} - -Benchmark* Benchmark::Unit(TimeUnit unit) { - time_unit_ = unit; - return this; -} - -Benchmark* Benchmark::Range(int64_t start, int64_t limit) { - CHECK(ArgsCnt() == -1 || ArgsCnt() == 1); - std::vector arglist; - AddRange(&arglist, start, limit, range_multiplier_); - - for (int64_t i : arglist) { - args_.push_back({i}); - } - return this; -} - -Benchmark* Benchmark::Ranges( - const std::vector>& ranges) { - CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast(ranges.size())); - std::vector> arglists(ranges.size()); - for (std::size_t i = 0; i < ranges.size(); i++) { - AddRange(&arglists[i], ranges[i].first, ranges[i].second, - range_multiplier_); - } - - ArgsProduct(arglists); - - return this; -} - -Benchmark* Benchmark::ArgsProduct( - const std::vector>& arglists) { - CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast(arglists.size())); - - std::vector indices(arglists.size()); - const std::size_t total = std::accumulate( - std::begin(arglists), std::end(arglists), std::size_t{1}, - [](const std::size_t res, const std::vector& arglist) { - return res * arglist.size(); - }); - std::vector args; - args.reserve(arglists.size()); - for (std::size_t i = 0; i < total; i++) { - for (std::size_t arg = 0; arg < arglists.size(); arg++) { - args.push_back(arglists[arg][indices[arg]]); - } - args_.push_back(args); - args.clear(); - - std::size_t arg = 0; - do { - indices[arg] = (indices[arg] + 1) % arglists[arg].size(); - } while (indices[arg++] == 0 && arg < arglists.size()); - } - - return this; -} - -Benchmark* Benchmark::ArgName(const std::string& name) { - CHECK(ArgsCnt() == -1 || ArgsCnt() == 1); - arg_names_ = {name}; - return this; -} - -Benchmark* Benchmark::ArgNames(const std::vector& names) { - CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast(names.size())); - arg_names_ = names; - return this; -} - -Benchmark* Benchmark::DenseRange(int64_t start, int64_t limit, int step) { - CHECK(ArgsCnt() == -1 || ArgsCnt() == 1); - CHECK_LE(start, limit); - for (int64_t arg = start; arg <= limit; arg += step) { - args_.push_back({arg}); - } - return this; -} - -Benchmark* Benchmark::Args(const std::vector& args) { - CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast(args.size())); - args_.push_back(args); - return this; -} - -Benchmark* Benchmark::Apply(void (*custom_arguments)(Benchmark* benchmark)) { - custom_arguments(this); - return this; -} - -Benchmark* Benchmark::RangeMultiplier(int multiplier) { - CHECK(multiplier > 1); - range_multiplier_ = multiplier; - return this; -} - -Benchmark* Benchmark::MinTime(double t) { - CHECK(t > 0.0); - CHECK(iterations_ == 0); - min_time_ = t; - return this; -} - -Benchmark* Benchmark::Iterations(IterationCount n) { - CHECK(n > 0); - CHECK(IsZero(min_time_)); - iterations_ = n; - return this; -} - -Benchmark* Benchmark::Repetitions(int n) { - CHECK(n > 0); - repetitions_ = n; - return this; -} - -Benchmark* Benchmark::ReportAggregatesOnly(bool value) { - aggregation_report_mode_ = value ? ARM_ReportAggregatesOnly : ARM_Default; - return this; -} - -Benchmark* Benchmark::DisplayAggregatesOnly(bool value) { - // If we were called, the report mode is no longer 'unspecified', in any case. - aggregation_report_mode_ = static_cast( - aggregation_report_mode_ | ARM_Default); - - if (value) { - aggregation_report_mode_ = static_cast( - aggregation_report_mode_ | ARM_DisplayReportAggregatesOnly); - } else { - aggregation_report_mode_ = static_cast( - aggregation_report_mode_ & ~ARM_DisplayReportAggregatesOnly); - } - - return this; -} - -Benchmark* Benchmark::MeasureProcessCPUTime() { - // Can be used together with UseRealTime() / UseManualTime(). - measure_process_cpu_time_ = true; - return this; -} - -Benchmark* Benchmark::UseRealTime() { - CHECK(!use_manual_time_) - << "Cannot set UseRealTime and UseManualTime simultaneously."; - use_real_time_ = true; - return this; -} - -Benchmark* Benchmark::UseManualTime() { - CHECK(!use_real_time_) - << "Cannot set UseRealTime and UseManualTime simultaneously."; - use_manual_time_ = true; - return this; -} - -Benchmark* Benchmark::Complexity(BigO complexity) { - complexity_ = complexity; - return this; -} - -Benchmark* Benchmark::Complexity(BigOFunc* complexity) { - complexity_lambda_ = complexity; - complexity_ = oLambda; - return this; -} - -Benchmark* Benchmark::ComputeStatistics(std::string name, - StatisticsFunc* statistics) { - statistics_.emplace_back(name, statistics); - return this; -} - -Benchmark* Benchmark::Threads(int t) { - CHECK_GT(t, 0); - thread_counts_.push_back(t); - return this; -} - -Benchmark* Benchmark::ThreadRange(int min_threads, int max_threads) { - CHECK_GT(min_threads, 0); - CHECK_GE(max_threads, min_threads); - - AddRange(&thread_counts_, min_threads, max_threads, 2); - return this; -} - -Benchmark* Benchmark::DenseThreadRange(int min_threads, int max_threads, - int stride) { - CHECK_GT(min_threads, 0); - CHECK_GE(max_threads, min_threads); - CHECK_GE(stride, 1); - - for (auto i = min_threads; i < max_threads; i += stride) { - thread_counts_.push_back(i); - } - thread_counts_.push_back(max_threads); - return this; -} - -Benchmark* Benchmark::ThreadPerCpu() { - thread_counts_.push_back(CPUInfo::Get().num_cpus); - return this; -} - -void Benchmark::SetName(const char* name) { name_ = name; } - -int Benchmark::ArgsCnt() const { - if (args_.empty()) { - if (arg_names_.empty()) return -1; - return static_cast(arg_names_.size()); - } - return static_cast(args_.front().size()); -} - -//=============================================================================// -// FunctionBenchmark -//=============================================================================// - -void FunctionBenchmark::Run(State& st) { func_(st); } - -} // end namespace internal - -void ClearRegisteredBenchmarks() { - internal::BenchmarkFamilies::GetInstance()->ClearBenchmarks(); -} - -} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/benchmark_register.h b/benchmarks/thirdparty/benchmark/src/benchmark_register.h deleted file mode 100755 index 61377d7423..0000000000 --- a/benchmarks/thirdparty/benchmark/src/benchmark_register.h +++ /dev/null @@ -1,107 +0,0 @@ -#ifndef BENCHMARK_REGISTER_H -#define BENCHMARK_REGISTER_H - -#include - -#include "check.h" - -namespace benchmark { -namespace internal { - -// Append the powers of 'mult' in the closed interval [lo, hi]. -// Returns iterator to the start of the inserted range. -template -typename std::vector::iterator -AddPowers(std::vector* dst, T lo, T hi, int mult) { - CHECK_GE(lo, 0); - CHECK_GE(hi, lo); - CHECK_GE(mult, 2); - - const size_t start_offset = dst->size(); - - static const T kmax = std::numeric_limits::max(); - - // Space out the values in multiples of "mult" - for (T i = 1; i <= hi; i *= mult) { - if (i >= lo) { - dst->push_back(i); - } - // Break the loop here since multiplying by - // 'mult' would move outside of the range of T - if (i > kmax / mult) break; - } - - return dst->begin() + start_offset; -} - -template -void AddNegatedPowers(std::vector* dst, T lo, T hi, int mult) { - // We negate lo and hi so we require that they cannot be equal to 'min'. - CHECK_GT(lo, std::numeric_limits::min()); - CHECK_GT(hi, std::numeric_limits::min()); - CHECK_GE(hi, lo); - CHECK_LE(hi, 0); - - // Add positive powers, then negate and reverse. - // Casts necessary since small integers get promoted - // to 'int' when negating. - const auto lo_complement = static_cast(-lo); - const auto hi_complement = static_cast(-hi); - - const auto it = AddPowers(dst, hi_complement, lo_complement, mult); - - std::for_each(it, dst->end(), [](T& t) { t *= -1; }); - std::reverse(it, dst->end()); -} - -template -void AddRange(std::vector* dst, T lo, T hi, int mult) { - static_assert(std::is_integral::value && std::is_signed::value, - "Args type must be a signed integer"); - - CHECK_GE(hi, lo); - CHECK_GE(mult, 2); - - // Add "lo" - dst->push_back(lo); - - // Handle lo == hi as a special case, so we then know - // lo < hi and so it is safe to add 1 to lo and subtract 1 - // from hi without falling outside of the range of T. - if (lo == hi) return; - - // Ensure that lo_inner <= hi_inner below. - if (lo + 1 == hi) { - dst->push_back(hi); - return; - } - - // Add all powers of 'mult' in the range [lo+1, hi-1] (inclusive). - const auto lo_inner = static_cast(lo + 1); - const auto hi_inner = static_cast(hi - 1); - - // Insert negative values - if (lo_inner < 0) { - AddNegatedPowers(dst, lo_inner, std::min(hi_inner, T{-1}), mult); - } - - // Treat 0 as a special case (see discussion on #762). - if (lo <= 0 && hi >= 0) { - dst->push_back(0); - } - - // Insert positive values - if (hi_inner > 0) { - AddPowers(dst, std::max(lo_inner, T{1}), hi_inner, mult); - } - - // Add "hi" (if different from last value). - if (hi != dst->back()) { - dst->push_back(hi); - } -} - -} // namespace internal -} // namespace benchmark - -#endif // BENCHMARK_REGISTER_H diff --git a/benchmarks/thirdparty/benchmark/src/benchmark_runner.cc b/benchmarks/thirdparty/benchmark/src/benchmark_runner.cc deleted file mode 100755 index 7bc6b6329e..0000000000 --- a/benchmarks/thirdparty/benchmark/src/benchmark_runner.cc +++ /dev/null @@ -1,362 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "benchmark_runner.h" -#include "benchmark/benchmark.h" -#include "benchmark_api_internal.h" -#include "internal_macros.h" - -#ifndef BENCHMARK_OS_WINDOWS -#ifndef BENCHMARK_OS_FUCHSIA -#include -#endif -#include -#include -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "check.h" -#include "colorprint.h" -#include "commandlineflags.h" -#include "complexity.h" -#include "counter.h" -#include "internal_macros.h" -#include "log.h" -#include "mutex.h" -#include "re.h" -#include "statistics.h" -#include "string_util.h" -#include "thread_manager.h" -#include "thread_timer.h" - -namespace benchmark { - -namespace internal { - -MemoryManager* memory_manager = nullptr; - -namespace { - -static constexpr IterationCount kMaxIterations = 1000000000; - -BenchmarkReporter::Run CreateRunReport( - const benchmark::internal::BenchmarkInstance& b, - const internal::ThreadManager::Result& results, - IterationCount memory_iterations, - const MemoryManager::Result& memory_result, double seconds, - int64_t repetition_index) { - // Create report about this benchmark run. - BenchmarkReporter::Run report; - - report.run_name = b.name; - report.error_occurred = results.has_error_; - report.error_message = results.error_message_; - report.report_label = results.report_label_; - // This is the total iterations across all threads. - report.iterations = results.iterations; - report.time_unit = b.time_unit; - report.threads = b.threads; - report.repetition_index = repetition_index; - report.repetitions = b.repetitions; - - if (!report.error_occurred) { - if (b.use_manual_time) { - report.real_accumulated_time = results.manual_time_used; - } else { - report.real_accumulated_time = results.real_time_used; - } - report.cpu_accumulated_time = results.cpu_time_used; - report.complexity_n = results.complexity_n; - report.complexity = b.complexity; - report.complexity_lambda = b.complexity_lambda; - report.statistics = b.statistics; - report.counters = results.counters; - - if (memory_iterations > 0) { - report.has_memory_result = true; - report.allocs_per_iter = - memory_iterations ? static_cast(memory_result.num_allocs) / - memory_iterations - : 0; - report.max_bytes_used = memory_result.max_bytes_used; - } - - internal::Finish(&report.counters, results.iterations, seconds, b.threads); - } - return report; -} - -// Execute one thread of benchmark b for the specified number of iterations. -// Adds the stats collected for the thread into *total. -void RunInThread(const BenchmarkInstance* b, IterationCount iters, - int thread_id, ThreadManager* manager) { - internal::ThreadTimer timer( - b->measure_process_cpu_time - ? internal::ThreadTimer::CreateProcessCpuTime() - : internal::ThreadTimer::Create()); - State st = b->Run(iters, thread_id, &timer, manager); - CHECK(st.error_occurred() || st.iterations() >= st.max_iterations) - << "Benchmark returned before State::KeepRunning() returned false!"; - { - MutexLock l(manager->GetBenchmarkMutex()); - internal::ThreadManager::Result& results = manager->results; - results.iterations += st.iterations(); - results.cpu_time_used += timer.cpu_time_used(); - results.real_time_used += timer.real_time_used(); - results.manual_time_used += timer.manual_time_used(); - results.complexity_n += st.complexity_length_n(); - internal::Increment(&results.counters, st.counters); - } - manager->NotifyThreadComplete(); -} - -class BenchmarkRunner { - public: - BenchmarkRunner(const benchmark::internal::BenchmarkInstance& b_, - std::vector* complexity_reports_) - : b(b_), - complexity_reports(*complexity_reports_), - min_time(!IsZero(b.min_time) ? b.min_time : FLAGS_benchmark_min_time), - repeats(b.repetitions != 0 ? b.repetitions - : FLAGS_benchmark_repetitions), - has_explicit_iteration_count(b.iterations != 0), - pool(b.threads - 1), - iters(has_explicit_iteration_count ? b.iterations : 1) { - run_results.display_report_aggregates_only = - (FLAGS_benchmark_report_aggregates_only || - FLAGS_benchmark_display_aggregates_only); - run_results.file_report_aggregates_only = - FLAGS_benchmark_report_aggregates_only; - if (b.aggregation_report_mode != internal::ARM_Unspecified) { - run_results.display_report_aggregates_only = - (b.aggregation_report_mode & - internal::ARM_DisplayReportAggregatesOnly); - run_results.file_report_aggregates_only = - (b.aggregation_report_mode & internal::ARM_FileReportAggregatesOnly); - } - - for (int repetition_num = 0; repetition_num < repeats; repetition_num++) { - DoOneRepetition(repetition_num); - } - - // Calculate additional statistics - run_results.aggregates_only = ComputeStats(run_results.non_aggregates); - - // Maybe calculate complexity report - if ((b.complexity != oNone) && b.last_benchmark_instance) { - auto additional_run_stats = ComputeBigO(complexity_reports); - run_results.aggregates_only.insert(run_results.aggregates_only.end(), - additional_run_stats.begin(), - additional_run_stats.end()); - complexity_reports.clear(); - } - } - - RunResults&& get_results() { return std::move(run_results); } - - private: - RunResults run_results; - - const benchmark::internal::BenchmarkInstance& b; - std::vector& complexity_reports; - - const double min_time; - const int repeats; - const bool has_explicit_iteration_count; - - std::vector pool; - - IterationCount iters; // preserved between repetitions! - // So only the first repetition has to find/calculate it, - // the other repetitions will just use that precomputed iteration count. - - struct IterationResults { - internal::ThreadManager::Result results; - IterationCount iters; - double seconds; - }; - IterationResults DoNIterations() { - VLOG(2) << "Running " << b.name.str() << " for " << iters << "\n"; - - std::unique_ptr manager; - manager.reset(new internal::ThreadManager(b.threads)); - - // Run all but one thread in separate threads - for (std::size_t ti = 0; ti < pool.size(); ++ti) { - pool[ti] = std::thread(&RunInThread, &b, iters, static_cast(ti + 1), - manager.get()); - } - // And run one thread here directly. - // (If we were asked to run just one thread, we don't create new threads.) - // Yes, we need to do this here *after* we start the separate threads. - RunInThread(&b, iters, 0, manager.get()); - - // The main thread has finished. Now let's wait for the other threads. - manager->WaitForAllThreads(); - for (std::thread& thread : pool) thread.join(); - - IterationResults i; - // Acquire the measurements/counters from the manager, UNDER THE LOCK! - { - MutexLock l(manager->GetBenchmarkMutex()); - i.results = manager->results; - } - - // And get rid of the manager. - manager.reset(); - - // Adjust real/manual time stats since they were reported per thread. - i.results.real_time_used /= b.threads; - i.results.manual_time_used /= b.threads; - // If we were measuring whole-process CPU usage, adjust the CPU time too. - if (b.measure_process_cpu_time) i.results.cpu_time_used /= b.threads; - - VLOG(2) << "Ran in " << i.results.cpu_time_used << "/" - << i.results.real_time_used << "\n"; - - // So for how long were we running? - i.iters = iters; - // Base decisions off of real time if requested by this benchmark. - i.seconds = i.results.cpu_time_used; - if (b.use_manual_time) { - i.seconds = i.results.manual_time_used; - } else if (b.use_real_time) { - i.seconds = i.results.real_time_used; - } - - return i; - } - - IterationCount PredictNumItersNeeded(const IterationResults& i) const { - // See how much iterations should be increased by. - // Note: Avoid division by zero with max(seconds, 1ns). - double multiplier = min_time * 1.4 / std::max(i.seconds, 1e-9); - // If our last run was at least 10% of FLAGS_benchmark_min_time then we - // use the multiplier directly. - // Otherwise we use at most 10 times expansion. - // NOTE: When the last run was at least 10% of the min time the max - // expansion should be 14x. - bool is_significant = (i.seconds / min_time) > 0.1; - multiplier = is_significant ? multiplier : std::min(10.0, multiplier); - if (multiplier <= 1.0) multiplier = 2.0; - - // So what seems to be the sufficiently-large iteration count? Round up. - const IterationCount max_next_iters = static_cast( - std::lround(std::max(multiplier * static_cast(i.iters), - static_cast(i.iters) + 1.0))); - // But we do have *some* sanity limits though.. - const IterationCount next_iters = std::min(max_next_iters, kMaxIterations); - - VLOG(3) << "Next iters: " << next_iters << ", " << multiplier << "\n"; - return next_iters; // round up before conversion to integer. - } - - bool ShouldReportIterationResults(const IterationResults& i) const { - // Determine if this run should be reported; - // Either it has run for a sufficient amount of time - // or because an error was reported. - return i.results.has_error_ || - i.iters >= kMaxIterations || // Too many iterations already. - i.seconds >= min_time || // The elapsed time is large enough. - // CPU time is specified but the elapsed real time greatly exceeds - // the minimum time. - // Note that user provided timers are except from this sanity check. - ((i.results.real_time_used >= 5 * min_time) && !b.use_manual_time); - } - - void DoOneRepetition(int64_t repetition_index) { - const bool is_the_first_repetition = repetition_index == 0; - IterationResults i; - - // We *may* be gradually increasing the length (iteration count) - // of the benchmark until we decide the results are significant. - // And once we do, we report those last results and exit. - // Please do note that the if there are repetitions, the iteration count - // is *only* calculated for the *first* repetition, and other repetitions - // simply use that precomputed iteration count. - for (;;) { - i = DoNIterations(); - - // Do we consider the results to be significant? - // If we are doing repetitions, and the first repetition was already done, - // it has calculated the correct iteration time, so we have run that very - // iteration count just now. No need to calculate anything. Just report. - // Else, the normal rules apply. - const bool results_are_significant = !is_the_first_repetition || - has_explicit_iteration_count || - ShouldReportIterationResults(i); - - if (results_are_significant) break; // Good, let's report them! - - // Nope, bad iteration. Let's re-estimate the hopefully-sufficient - // iteration count, and run the benchmark again... - - iters = PredictNumItersNeeded(i); - assert(iters > i.iters && - "if we did more iterations than we want to do the next time, " - "then we should have accepted the current iteration run."); - } - - // Oh, one last thing, we need to also produce the 'memory measurements'.. - MemoryManager::Result memory_result; - IterationCount memory_iterations = 0; - if (memory_manager != nullptr) { - // Only run a few iterations to reduce the impact of one-time - // allocations in benchmarks that are not properly managed. - memory_iterations = std::min(16, iters); - memory_manager->Start(); - std::unique_ptr manager; - manager.reset(new internal::ThreadManager(1)); - RunInThread(&b, memory_iterations, 0, manager.get()); - manager->WaitForAllThreads(); - manager.reset(); - - memory_manager->Stop(&memory_result); - } - - // Ok, now actualy report. - BenchmarkReporter::Run report = - CreateRunReport(b, i.results, memory_iterations, memory_result, - i.seconds, repetition_index); - - if (!report.error_occurred && b.complexity != oNone) - complexity_reports.push_back(report); - - run_results.non_aggregates.push_back(report); - } -}; - -} // end namespace - -RunResults RunBenchmark( - const benchmark::internal::BenchmarkInstance& b, - std::vector* complexity_reports) { - internal::BenchmarkRunner r(b, complexity_reports); - return r.get_results(); -} - -} // end namespace internal - -} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/benchmark_runner.h b/benchmarks/thirdparty/benchmark/src/benchmark_runner.h deleted file mode 100755 index 96e8282a11..0000000000 --- a/benchmarks/thirdparty/benchmark/src/benchmark_runner.h +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef BENCHMARK_RUNNER_H_ -#define BENCHMARK_RUNNER_H_ - -#include "benchmark_api_internal.h" -#include "internal_macros.h" - -DECLARE_double(benchmark_min_time); - -DECLARE_int32(benchmark_repetitions); - -DECLARE_bool(benchmark_report_aggregates_only); - -DECLARE_bool(benchmark_display_aggregates_only); - -namespace benchmark { - -namespace internal { - -extern MemoryManager* memory_manager; - -struct RunResults { - std::vector non_aggregates; - std::vector aggregates_only; - - bool display_report_aggregates_only = false; - bool file_report_aggregates_only = false; -}; - -RunResults RunBenchmark( - const benchmark::internal::BenchmarkInstance& b, - std::vector* complexity_reports); - -} // namespace internal - -} // end namespace benchmark - -#endif // BENCHMARK_RUNNER_H_ diff --git a/benchmarks/thirdparty/benchmark/src/check.h b/benchmarks/thirdparty/benchmark/src/check.h deleted file mode 100755 index f5f8253f80..0000000000 --- a/benchmarks/thirdparty/benchmark/src/check.h +++ /dev/null @@ -1,82 +0,0 @@ -#ifndef CHECK_H_ -#define CHECK_H_ - -#include -#include -#include - -#include "internal_macros.h" -#include "log.h" - -namespace benchmark { -namespace internal { - -typedef void(AbortHandlerT)(); - -inline AbortHandlerT*& GetAbortHandler() { - static AbortHandlerT* handler = &std::abort; - return handler; -} - -BENCHMARK_NORETURN inline void CallAbortHandler() { - GetAbortHandler()(); - std::abort(); // fallback to enforce noreturn -} - -// CheckHandler is the class constructed by failing CHECK macros. CheckHandler -// will log information about the failures and abort when it is destructed. -class CheckHandler { - public: - CheckHandler(const char* check, const char* file, const char* func, int line) - : log_(GetErrorLogInstance()) { - log_ << file << ":" << line << ": " << func << ": Check `" << check - << "' failed. "; - } - - LogType& GetLog() { return log_; } - - BENCHMARK_NORETURN ~CheckHandler() BENCHMARK_NOEXCEPT_OP(false) { - log_ << std::endl; - CallAbortHandler(); - } - - CheckHandler& operator=(const CheckHandler&) = delete; - CheckHandler(const CheckHandler&) = delete; - CheckHandler() = delete; - - private: - LogType& log_; -}; - -} // end namespace internal -} // end namespace benchmark - -// The CHECK macro returns a std::ostream object that can have extra information -// written to it. -#ifndef NDEBUG -#define CHECK(b) \ - (b ? ::benchmark::internal::GetNullLogInstance() \ - : ::benchmark::internal::CheckHandler(#b, __FILE__, __func__, __LINE__) \ - .GetLog()) -#else -#define CHECK(b) ::benchmark::internal::GetNullLogInstance() -#endif - -// clang-format off -// preserve whitespacing between operators for alignment -#define CHECK_EQ(a, b) CHECK((a) == (b)) -#define CHECK_NE(a, b) CHECK((a) != (b)) -#define CHECK_GE(a, b) CHECK((a) >= (b)) -#define CHECK_LE(a, b) CHECK((a) <= (b)) -#define CHECK_GT(a, b) CHECK((a) > (b)) -#define CHECK_LT(a, b) CHECK((a) < (b)) - -#define CHECK_FLOAT_EQ(a, b, eps) CHECK(std::fabs((a) - (b)) < (eps)) -#define CHECK_FLOAT_NE(a, b, eps) CHECK(std::fabs((a) - (b)) >= (eps)) -#define CHECK_FLOAT_GE(a, b, eps) CHECK((a) - (b) > -(eps)) -#define CHECK_FLOAT_LE(a, b, eps) CHECK((b) - (a) > -(eps)) -#define CHECK_FLOAT_GT(a, b, eps) CHECK((a) - (b) > (eps)) -#define CHECK_FLOAT_LT(a, b, eps) CHECK((b) - (a) > (eps)) -//clang-format on - -#endif // CHECK_H_ diff --git a/benchmarks/thirdparty/benchmark/src/colorprint.cc b/benchmarks/thirdparty/benchmark/src/colorprint.cc deleted file mode 100755 index fff6a98818..0000000000 --- a/benchmarks/thirdparty/benchmark/src/colorprint.cc +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "colorprint.h" - -#include -#include -#include -#include -#include -#include - -#include "check.h" -#include "internal_macros.h" - -#ifdef BENCHMARK_OS_WINDOWS -#include -#include -#else -#include -#endif // BENCHMARK_OS_WINDOWS - -namespace benchmark { -namespace { -#ifdef BENCHMARK_OS_WINDOWS -typedef WORD PlatformColorCode; -#else -typedef const char* PlatformColorCode; -#endif - -PlatformColorCode GetPlatformColorCode(LogColor color) { -#ifdef BENCHMARK_OS_WINDOWS - switch (color) { - case COLOR_RED: - return FOREGROUND_RED; - case COLOR_GREEN: - return FOREGROUND_GREEN; - case COLOR_YELLOW: - return FOREGROUND_RED | FOREGROUND_GREEN; - case COLOR_BLUE: - return FOREGROUND_BLUE; - case COLOR_MAGENTA: - return FOREGROUND_BLUE | FOREGROUND_RED; - case COLOR_CYAN: - return FOREGROUND_BLUE | FOREGROUND_GREEN; - case COLOR_WHITE: // fall through to default - default: - return 0; - } -#else - switch (color) { - case COLOR_RED: - return "1"; - case COLOR_GREEN: - return "2"; - case COLOR_YELLOW: - return "3"; - case COLOR_BLUE: - return "4"; - case COLOR_MAGENTA: - return "5"; - case COLOR_CYAN: - return "6"; - case COLOR_WHITE: - return "7"; - default: - return nullptr; - }; -#endif -} - -} // end namespace - -std::string FormatString(const char* msg, va_list args) { - // we might need a second shot at this, so pre-emptivly make a copy - va_list args_cp; - va_copy(args_cp, args); - - std::size_t size = 256; - char local_buff[256]; - auto ret = vsnprintf(local_buff, size, msg, args_cp); - - va_end(args_cp); - - // currently there is no error handling for failure, so this is hack. - CHECK(ret >= 0); - - if (ret == 0) // handle empty expansion - return {}; - else if (static_cast(ret) < size) - return local_buff; - else { - // we did not provide a long enough buffer on our first attempt. - size = (size_t)ret + 1; // + 1 for the null byte - std::unique_ptr buff(new char[size]); - ret = vsnprintf(buff.get(), size, msg, args); - CHECK(ret > 0 && ((size_t)ret) < size); - return buff.get(); - } -} - -std::string FormatString(const char* msg, ...) { - va_list args; - va_start(args, msg); - auto tmp = FormatString(msg, args); - va_end(args); - return tmp; -} - -void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, ...) { - va_list args; - va_start(args, fmt); - ColorPrintf(out, color, fmt, args); - va_end(args); -} - -void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, - va_list args) { -#ifdef BENCHMARK_OS_WINDOWS - ((void)out); // suppress unused warning - - const HANDLE stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE); - - // Gets the current text color. - CONSOLE_SCREEN_BUFFER_INFO buffer_info; - GetConsoleScreenBufferInfo(stdout_handle, &buffer_info); - const WORD old_color_attrs = buffer_info.wAttributes; - - // We need to flush the stream buffers into the console before each - // SetConsoleTextAttribute call lest it affect the text that is already - // printed but has not yet reached the console. - fflush(stdout); - SetConsoleTextAttribute(stdout_handle, - GetPlatformColorCode(color) | FOREGROUND_INTENSITY); - vprintf(fmt, args); - - fflush(stdout); - // Restores the text color. - SetConsoleTextAttribute(stdout_handle, old_color_attrs); -#else - const char* color_code = GetPlatformColorCode(color); - if (color_code) out << FormatString("\033[0;3%sm", color_code); - out << FormatString(fmt, args) << "\033[m"; -#endif -} - -bool IsColorTerminal() { -#if BENCHMARK_OS_WINDOWS - // On Windows the TERM variable is usually not set, but the - // console there does support colors. - return 0 != _isatty(_fileno(stdout)); -#else - // On non-Windows platforms, we rely on the TERM variable. This list of - // supported TERM values is copied from Google Test: - // . - const char* const SUPPORTED_TERM_VALUES[] = { - "xterm", "xterm-color", "xterm-256color", - "screen", "screen-256color", "tmux", - "tmux-256color", "rxvt-unicode", "rxvt-unicode-256color", - "linux", "cygwin", - }; - - const char* const term = getenv("TERM"); - - bool term_supports_color = false; - for (const char* candidate : SUPPORTED_TERM_VALUES) { - if (term && 0 == strcmp(term, candidate)) { - term_supports_color = true; - break; - } - } - - return 0 != isatty(fileno(stdout)) && term_supports_color; -#endif // BENCHMARK_OS_WINDOWS -} - -} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/colorprint.h b/benchmarks/thirdparty/benchmark/src/colorprint.h deleted file mode 100755 index 9f6fab9b34..0000000000 --- a/benchmarks/thirdparty/benchmark/src/colorprint.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef BENCHMARK_COLORPRINT_H_ -#define BENCHMARK_COLORPRINT_H_ - -#include -#include -#include - -namespace benchmark { -enum LogColor { - COLOR_DEFAULT, - COLOR_RED, - COLOR_GREEN, - COLOR_YELLOW, - COLOR_BLUE, - COLOR_MAGENTA, - COLOR_CYAN, - COLOR_WHITE -}; - -std::string FormatString(const char* msg, va_list args); -std::string FormatString(const char* msg, ...); - -void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, - va_list args); -void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, ...); - -// Returns true if stdout appears to be a terminal that supports colored -// output, false otherwise. -bool IsColorTerminal(); - -} // end namespace benchmark - -#endif // BENCHMARK_COLORPRINT_H_ diff --git a/benchmarks/thirdparty/benchmark/src/commandlineflags.cc b/benchmarks/thirdparty/benchmark/src/commandlineflags.cc deleted file mode 100755 index 0648fe3a06..0000000000 --- a/benchmarks/thirdparty/benchmark/src/commandlineflags.cc +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "commandlineflags.h" - -#include -#include -#include -#include -#include -#include - -namespace benchmark { -namespace { - -// Parses 'str' for a 32-bit signed integer. If successful, writes -// the result to *value and returns true; otherwise leaves *value -// unchanged and returns false. -bool ParseInt32(const std::string& src_text, const char* str, int32_t* value) { - // Parses the environment variable as a decimal integer. - char* end = nullptr; - const long long_value = strtol(str, &end, 10); // NOLINT - - // Has strtol() consumed all characters in the string? - if (*end != '\0') { - // No - an invalid character was encountered. - std::cerr << src_text << " is expected to be a 32-bit integer, " - << "but actually has value \"" << str << "\".\n"; - return false; - } - - // Is the parsed value in the range of an Int32? - const int32_t result = static_cast(long_value); - if (long_value == std::numeric_limits::max() || - long_value == std::numeric_limits::min() || - // The parsed value overflows as a long. (strtol() returns - // LONG_MAX or LONG_MIN when the input overflows.) - result != long_value - // The parsed value overflows as an Int32. - ) { - std::cerr << src_text << " is expected to be a 32-bit integer, " - << "but actually has value \"" << str << "\", " - << "which overflows.\n"; - return false; - } - - *value = result; - return true; -} - -// Parses 'str' for a double. If successful, writes the result to *value and -// returns true; otherwise leaves *value unchanged and returns false. -bool ParseDouble(const std::string& src_text, const char* str, double* value) { - // Parses the environment variable as a decimal integer. - char* end = nullptr; - const double double_value = strtod(str, &end); // NOLINT - - // Has strtol() consumed all characters in the string? - if (*end != '\0') { - // No - an invalid character was encountered. - std::cerr << src_text << " is expected to be a double, " - << "but actually has value \"" << str << "\".\n"; - return false; - } - - *value = double_value; - return true; -} - -// Returns the name of the environment variable corresponding to the -// given flag. For example, FlagToEnvVar("foo") will return -// "BENCHMARK_FOO" in the open-source version. -static std::string FlagToEnvVar(const char* flag) { - const std::string flag_str(flag); - - std::string env_var; - for (size_t i = 0; i != flag_str.length(); ++i) - env_var += static_cast(::toupper(flag_str.c_str()[i])); - - return env_var; -} - -} // namespace - -bool BoolFromEnv(const char* flag, bool default_val) { - const std::string env_var = FlagToEnvVar(flag); - const char* const value_str = getenv(env_var.c_str()); - return value_str == nullptr ? default_val : IsTruthyFlagValue(value_str); -} - -int32_t Int32FromEnv(const char* flag, int32_t default_val) { - const std::string env_var = FlagToEnvVar(flag); - const char* const value_str = getenv(env_var.c_str()); - int32_t value = default_val; - if (value_str == nullptr || - !ParseInt32(std::string("Environment variable ") + env_var, value_str, - &value)) { - return default_val; - } - return value; -} - -double DoubleFromEnv(const char* flag, double default_val) { - const std::string env_var = FlagToEnvVar(flag); - const char* const value_str = getenv(env_var.c_str()); - double value = default_val; - if (value_str == nullptr || - !ParseDouble(std::string("Environment variable ") + env_var, value_str, - &value)) { - return default_val; - } - return value; -} - -const char* StringFromEnv(const char* flag, const char* default_val) { - const std::string env_var = FlagToEnvVar(flag); - const char* const value = getenv(env_var.c_str()); - return value == nullptr ? default_val : value; -} - -// Parses a string as a command line flag. The string should have -// the format "--flag=value". When def_optional is true, the "=value" -// part can be omitted. -// -// Returns the value of the flag, or nullptr if the parsing failed. -const char* ParseFlagValue(const char* str, const char* flag, - bool def_optional) { - // str and flag must not be nullptr. - if (str == nullptr || flag == nullptr) return nullptr; - - // The flag must start with "--". - const std::string flag_str = std::string("--") + std::string(flag); - const size_t flag_len = flag_str.length(); - if (strncmp(str, flag_str.c_str(), flag_len) != 0) return nullptr; - - // Skips the flag name. - const char* flag_end = str + flag_len; - - // When def_optional is true, it's OK to not have a "=value" part. - if (def_optional && (flag_end[0] == '\0')) return flag_end; - - // If def_optional is true and there are more characters after the - // flag name, or if def_optional is false, there must be a '=' after - // the flag name. - if (flag_end[0] != '=') return nullptr; - - // Returns the string after "=". - return flag_end + 1; -} - -bool ParseBoolFlag(const char* str, const char* flag, bool* value) { - // Gets the value of the flag as a string. - const char* const value_str = ParseFlagValue(str, flag, true); - - // Aborts if the parsing failed. - if (value_str == nullptr) return false; - - // Converts the string value to a bool. - *value = IsTruthyFlagValue(value_str); - return true; -} - -bool ParseInt32Flag(const char* str, const char* flag, int32_t* value) { - // Gets the value of the flag as a string. - const char* const value_str = ParseFlagValue(str, flag, false); - - // Aborts if the parsing failed. - if (value_str == nullptr) return false; - - // Sets *value to the value of the flag. - return ParseInt32(std::string("The value of flag --") + flag, value_str, - value); -} - -bool ParseDoubleFlag(const char* str, const char* flag, double* value) { - // Gets the value of the flag as a string. - const char* const value_str = ParseFlagValue(str, flag, false); - - // Aborts if the parsing failed. - if (value_str == nullptr) return false; - - // Sets *value to the value of the flag. - return ParseDouble(std::string("The value of flag --") + flag, value_str, - value); -} - -bool ParseStringFlag(const char* str, const char* flag, std::string* value) { - // Gets the value of the flag as a string. - const char* const value_str = ParseFlagValue(str, flag, false); - - // Aborts if the parsing failed. - if (value_str == nullptr) return false; - - *value = value_str; - return true; -} - -bool IsFlag(const char* str, const char* flag) { - return (ParseFlagValue(str, flag, true) != nullptr); -} - -bool IsTruthyFlagValue(const std::string& value) { - if (value.size() == 1) { - char v = value[0]; - return isalnum(v) && - !(v == '0' || v == 'f' || v == 'F' || v == 'n' || v == 'N'); - } else if (!value.empty()) { - std::string value_lower(value); - std::transform(value_lower.begin(), value_lower.end(), value_lower.begin(), - [](char c) { return static_cast(::tolower(c)); }); - return !(value_lower == "false" || value_lower == "no" || - value_lower == "off"); - } else - return true; -} - -} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/commandlineflags.h b/benchmarks/thirdparty/benchmark/src/commandlineflags.h deleted file mode 100755 index 3a1f6a8dbc..0000000000 --- a/benchmarks/thirdparty/benchmark/src/commandlineflags.h +++ /dev/null @@ -1,103 +0,0 @@ -#ifndef BENCHMARK_COMMANDLINEFLAGS_H_ -#define BENCHMARK_COMMANDLINEFLAGS_H_ - -#include -#include - -// Macro for referencing flags. -#define FLAG(name) FLAGS_##name - -// Macros for declaring flags. -#define DECLARE_bool(name) extern bool FLAG(name) -#define DECLARE_int32(name) extern int32_t FLAG(name) -#define DECLARE_double(name) extern double FLAG(name) -#define DECLARE_string(name) extern std::string FLAG(name) - -// Macros for defining flags. -#define DEFINE_bool(name, default_val) \ - bool FLAG(name) = \ - benchmark::BoolFromEnv(#name, default_val) -#define DEFINE_int32(name, default_val) \ - int32_t FLAG(name) = \ - benchmark::Int32FromEnv(#name, default_val) -#define DEFINE_double(name, default_val) \ - double FLAG(name) = \ - benchmark::DoubleFromEnv(#name, default_val) -#define DEFINE_string(name, default_val) \ - std::string FLAG(name) = \ - benchmark::StringFromEnv(#name, default_val) - -namespace benchmark { - -// Parses a bool from the environment variable -// corresponding to the given flag. -// -// If the variable exists, returns IsTruthyFlagValue() value; if not, -// returns the given default value. -bool BoolFromEnv(const char* flag, bool default_val); - -// Parses an Int32 from the environment variable -// corresponding to the given flag. -// -// If the variable exists, returns ParseInt32() value; if not, returns -// the given default value. -int32_t Int32FromEnv(const char* flag, int32_t default_val); - -// Parses an Double from the environment variable -// corresponding to the given flag. -// -// If the variable exists, returns ParseDouble(); if not, returns -// the given default value. -double DoubleFromEnv(const char* flag, double default_val); - -// Parses a string from the environment variable -// corresponding to the given flag. -// -// If variable exists, returns its value; if not, returns -// the given default value. -const char* StringFromEnv(const char* flag, const char* default_val); - -// Parses a string for a bool flag, in the form of either -// "--flag=value" or "--flag". -// -// In the former case, the value is taken as true if it passes IsTruthyValue(). -// -// In the latter case, the value is taken as true. -// -// On success, stores the value of the flag in *value, and returns -// true. On failure, returns false without changing *value. -bool ParseBoolFlag(const char* str, const char* flag, bool* value); - -// Parses a string for an Int32 flag, in the form of -// "--flag=value". -// -// On success, stores the value of the flag in *value, and returns -// true. On failure, returns false without changing *value. -bool ParseInt32Flag(const char* str, const char* flag, int32_t* value); - -// Parses a string for a Double flag, in the form of -// "--flag=value". -// -// On success, stores the value of the flag in *value, and returns -// true. On failure, returns false without changing *value. -bool ParseDoubleFlag(const char* str, const char* flag, double* value); - -// Parses a string for a string flag, in the form of -// "--flag=value". -// -// On success, stores the value of the flag in *value, and returns -// true. On failure, returns false without changing *value. -bool ParseStringFlag(const char* str, const char* flag, std::string* value); - -// Returns true if the string matches the flag. -bool IsFlag(const char* str, const char* flag); - -// Returns true unless value starts with one of: '0', 'f', 'F', 'n' or 'N', or -// some non-alphanumeric character. Also returns false if the value matches -// one of 'no', 'false', 'off' (case-insensitive). As a special case, also -// returns true if value is the empty string. -bool IsTruthyFlagValue(const std::string& value); - -} // end namespace benchmark - -#endif // BENCHMARK_COMMANDLINEFLAGS_H_ diff --git a/benchmarks/thirdparty/benchmark/src/complexity.cc b/benchmarks/thirdparty/benchmark/src/complexity.cc deleted file mode 100755 index aeed67f0c7..0000000000 --- a/benchmarks/thirdparty/benchmark/src/complexity.cc +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright 2016 Ismael Jimenez Martinez. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Source project : https://github.com/ismaelJimenez/cpp.leastsq -// Adapted to be used with google benchmark - -#include "benchmark/benchmark.h" - -#include -#include -#include "check.h" -#include "complexity.h" - -namespace benchmark { - -// Internal function to calculate the different scalability forms -BigOFunc* FittingCurve(BigO complexity) { - static const double kLog2E = 1.44269504088896340736; - switch (complexity) { - case oN: - return [](IterationCount n) -> double { return static_cast(n); }; - case oNSquared: - return [](IterationCount n) -> double { return std::pow(n, 2); }; - case oNCubed: - return [](IterationCount n) -> double { return std::pow(n, 3); }; - case oLogN: - /* Note: can't use log2 because Android's GNU STL lacks it */ - return - [](IterationCount n) { return kLog2E * log(static_cast(n)); }; - case oNLogN: - /* Note: can't use log2 because Android's GNU STL lacks it */ - return [](IterationCount n) { - return kLog2E * n * log(static_cast(n)); - }; - case o1: - default: - return [](IterationCount) { return 1.0; }; - } -} - -// Function to return an string for the calculated complexity -std::string GetBigOString(BigO complexity) { - switch (complexity) { - case oN: - return "N"; - case oNSquared: - return "N^2"; - case oNCubed: - return "N^3"; - case oLogN: - return "lgN"; - case oNLogN: - return "NlgN"; - case o1: - return "(1)"; - default: - return "f(N)"; - } -} - -// Find the coefficient for the high-order term in the running time, by -// minimizing the sum of squares of relative error, for the fitting curve -// given by the lambda expression. -// - n : Vector containing the size of the benchmark tests. -// - time : Vector containing the times for the benchmark tests. -// - fitting_curve : lambda expression (e.g. [](int64_t n) {return n; };). - -// For a deeper explanation on the algorithm logic, please refer to -// https://en.wikipedia.org/wiki/Least_squares#Least_squares,_regression_analysis_and_statistics - -LeastSq MinimalLeastSq(const std::vector& n, - const std::vector& time, - BigOFunc* fitting_curve) { - double sigma_gn = 0.0; - double sigma_gn_squared = 0.0; - double sigma_time = 0.0; - double sigma_time_gn = 0.0; - - // Calculate least square fitting parameter - for (size_t i = 0; i < n.size(); ++i) { - double gn_i = fitting_curve(n[i]); - sigma_gn += gn_i; - sigma_gn_squared += gn_i * gn_i; - sigma_time += time[i]; - sigma_time_gn += time[i] * gn_i; - } - - LeastSq result; - result.complexity = oLambda; - - // Calculate complexity. - result.coef = sigma_time_gn / sigma_gn_squared; - - // Calculate RMS - double rms = 0.0; - for (size_t i = 0; i < n.size(); ++i) { - double fit = result.coef * fitting_curve(n[i]); - rms += pow((time[i] - fit), 2); - } - - // Normalized RMS by the mean of the observed values - double mean = sigma_time / n.size(); - result.rms = sqrt(rms / n.size()) / mean; - - return result; -} - -// Find the coefficient for the high-order term in the running time, by -// minimizing the sum of squares of relative error. -// - n : Vector containing the size of the benchmark tests. -// - time : Vector containing the times for the benchmark tests. -// - complexity : If different than oAuto, the fitting curve will stick to -// this one. If it is oAuto, it will be calculated the best -// fitting curve. -LeastSq MinimalLeastSq(const std::vector& n, - const std::vector& time, const BigO complexity) { - CHECK_EQ(n.size(), time.size()); - CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two - // benchmark runs are given - CHECK_NE(complexity, oNone); - - LeastSq best_fit; - - if (complexity == oAuto) { - std::vector fit_curves = {oLogN, oN, oNLogN, oNSquared, oNCubed}; - - // Take o1 as default best fitting curve - best_fit = MinimalLeastSq(n, time, FittingCurve(o1)); - best_fit.complexity = o1; - - // Compute all possible fitting curves and stick to the best one - for (const auto& fit : fit_curves) { - LeastSq current_fit = MinimalLeastSq(n, time, FittingCurve(fit)); - if (current_fit.rms < best_fit.rms) { - best_fit = current_fit; - best_fit.complexity = fit; - } - } - } else { - best_fit = MinimalLeastSq(n, time, FittingCurve(complexity)); - best_fit.complexity = complexity; - } - - return best_fit; -} - -std::vector ComputeBigO( - const std::vector& reports) { - typedef BenchmarkReporter::Run Run; - std::vector results; - - if (reports.size() < 2) return results; - - // Accumulators. - std::vector n; - std::vector real_time; - std::vector cpu_time; - - // Populate the accumulators. - for (const Run& run : reports) { - CHECK_GT(run.complexity_n, 0) << "Did you forget to call SetComplexityN?"; - n.push_back(run.complexity_n); - real_time.push_back(run.real_accumulated_time / run.iterations); - cpu_time.push_back(run.cpu_accumulated_time / run.iterations); - } - - LeastSq result_cpu; - LeastSq result_real; - - if (reports[0].complexity == oLambda) { - result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity_lambda); - result_real = MinimalLeastSq(n, real_time, reports[0].complexity_lambda); - } else { - result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity); - result_real = MinimalLeastSq(n, real_time, result_cpu.complexity); - } - - // Drop the 'args' when reporting complexity. - auto run_name = reports[0].run_name; - run_name.args.clear(); - - // Get the data from the accumulator to BenchmarkReporter::Run's. - Run big_o; - big_o.run_name = run_name; - big_o.run_type = BenchmarkReporter::Run::RT_Aggregate; - big_o.repetitions = reports[0].repetitions; - big_o.repetition_index = Run::no_repetition_index; - big_o.threads = reports[0].threads; - big_o.aggregate_name = "BigO"; - big_o.report_label = reports[0].report_label; - big_o.iterations = 0; - big_o.real_accumulated_time = result_real.coef; - big_o.cpu_accumulated_time = result_cpu.coef; - big_o.report_big_o = true; - big_o.complexity = result_cpu.complexity; - - // All the time results are reported after being multiplied by the - // time unit multiplier. But since RMS is a relative quantity it - // should not be multiplied at all. So, here, we _divide_ it by the - // multiplier so that when it is multiplied later the result is the - // correct one. - double multiplier = GetTimeUnitMultiplier(reports[0].time_unit); - - // Only add label to mean/stddev if it is same for all runs - Run rms; - rms.run_name = run_name; - rms.run_type = BenchmarkReporter::Run::RT_Aggregate; - rms.aggregate_name = "RMS"; - rms.report_label = big_o.report_label; - rms.iterations = 0; - rms.repetition_index = Run::no_repetition_index; - rms.repetitions = reports[0].repetitions; - rms.threads = reports[0].threads; - rms.real_accumulated_time = result_real.rms / multiplier; - rms.cpu_accumulated_time = result_cpu.rms / multiplier; - rms.report_rms = true; - rms.complexity = result_cpu.complexity; - // don't forget to keep the time unit, or we won't be able to - // recover the correct value. - rms.time_unit = reports[0].time_unit; - - results.push_back(big_o); - results.push_back(rms); - return results; -} - -} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/complexity.h b/benchmarks/thirdparty/benchmark/src/complexity.h deleted file mode 100755 index df29b48d29..0000000000 --- a/benchmarks/thirdparty/benchmark/src/complexity.h +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2016 Ismael Jimenez Martinez. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Source project : https://github.com/ismaelJimenez/cpp.leastsq -// Adapted to be used with google benchmark - -#ifndef COMPLEXITY_H_ -#define COMPLEXITY_H_ - -#include -#include - -#include "benchmark/benchmark.h" - -namespace benchmark { - -// Return a vector containing the bigO and RMS information for the specified -// list of reports. If 'reports.size() < 2' an empty vector is returned. -std::vector ComputeBigO( - const std::vector& reports); - -// This data structure will contain the result returned by MinimalLeastSq -// - coef : Estimated coeficient for the high-order term as -// interpolated from data. -// - rms : Normalized Root Mean Squared Error. -// - complexity : Scalability form (e.g. oN, oNLogN). In case a scalability -// form has been provided to MinimalLeastSq this will return -// the same value. In case BigO::oAuto has been selected, this -// parameter will return the best fitting curve detected. - -struct LeastSq { - LeastSq() : coef(0.0), rms(0.0), complexity(oNone) {} - - double coef; - double rms; - BigO complexity; -}; - -// Function to return an string for the calculated complexity -std::string GetBigOString(BigO complexity); - -} // end namespace benchmark - -#endif // COMPLEXITY_H_ diff --git a/benchmarks/thirdparty/benchmark/src/console_reporter.cc b/benchmarks/thirdparty/benchmark/src/console_reporter.cc deleted file mode 100755 index 6fd764525e..0000000000 --- a/benchmarks/thirdparty/benchmark/src/console_reporter.cc +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "benchmark/benchmark.h" -#include "check.h" -#include "colorprint.h" -#include "commandlineflags.h" -#include "complexity.h" -#include "counter.h" -#include "internal_macros.h" -#include "string_util.h" -#include "timers.h" - -namespace benchmark { - -bool ConsoleReporter::ReportContext(const Context& context) { - name_field_width_ = context.name_field_width; - printed_header_ = false; - prev_counters_.clear(); - - PrintBasicContext(&GetErrorStream(), context); - -#ifdef BENCHMARK_OS_WINDOWS - if ((output_options_ & OO_Color) && &std::cout != &GetOutputStream()) { - GetErrorStream() - << "Color printing is only supported for stdout on windows." - " Disabling color printing\n"; - output_options_ = static_cast< OutputOptions >(output_options_ & ~OO_Color); - } -#endif - - return true; -} - -void ConsoleReporter::PrintHeader(const Run& run) { - std::string str = FormatString("%-*s %13s %15s %12s", static_cast(name_field_width_), - "Benchmark", "Time", "CPU", "Iterations"); - if(!run.counters.empty()) { - if(output_options_ & OO_Tabular) { - for(auto const& c : run.counters) { - str += FormatString(" %10s", c.first.c_str()); - } - } else { - str += " UserCounters..."; - } - } - std::string line = std::string(str.length(), '-'); - GetOutputStream() << line << "\n" << str << "\n" << line << "\n"; -} - -void ConsoleReporter::ReportRuns(const std::vector& reports) { - for (const auto& run : reports) { - // print the header: - // --- if none was printed yet - bool print_header = !printed_header_; - // --- or if the format is tabular and this run - // has different fields from the prev header - print_header |= (output_options_ & OO_Tabular) && - (!internal::SameNames(run.counters, prev_counters_)); - if (print_header) { - printed_header_ = true; - prev_counters_ = run.counters; - PrintHeader(run); - } - // As an alternative to printing the headers like this, we could sort - // the benchmarks by header and then print. But this would require - // waiting for the full results before printing, or printing twice. - PrintRunData(run); - } -} - -static void IgnoreColorPrint(std::ostream& out, LogColor, const char* fmt, - ...) { - va_list args; - va_start(args, fmt); - out << FormatString(fmt, args); - va_end(args); -} - - -static std::string FormatTime(double time) { - // Align decimal places... - if (time < 1.0) { - return FormatString("%10.3f", time); - } - if (time < 10.0) { - return FormatString("%10.2f", time); - } - if (time < 100.0) { - return FormatString("%10.1f", time); - } - return FormatString("%10.0f", time); -} - -void ConsoleReporter::PrintRunData(const Run& result) { - typedef void(PrinterFn)(std::ostream&, LogColor, const char*, ...); - auto& Out = GetOutputStream(); - PrinterFn* printer = (output_options_ & OO_Color) ? - (PrinterFn*)ColorPrintf : IgnoreColorPrint; - auto name_color = - (result.report_big_o || result.report_rms) ? COLOR_BLUE : COLOR_GREEN; - printer(Out, name_color, "%-*s ", name_field_width_, - result.benchmark_name().c_str()); - - if (result.error_occurred) { - printer(Out, COLOR_RED, "ERROR OCCURRED: \'%s\'", - result.error_message.c_str()); - printer(Out, COLOR_DEFAULT, "\n"); - return; - } - - const double real_time = result.GetAdjustedRealTime(); - const double cpu_time = result.GetAdjustedCPUTime(); - const std::string real_time_str = FormatTime(real_time); - const std::string cpu_time_str = FormatTime(cpu_time); - - - if (result.report_big_o) { - std::string big_o = GetBigOString(result.complexity); - printer(Out, COLOR_YELLOW, "%10.2f %-4s %10.2f %-4s ", real_time, big_o.c_str(), - cpu_time, big_o.c_str()); - } else if (result.report_rms) { - printer(Out, COLOR_YELLOW, "%10.0f %-4s %10.0f %-4s ", real_time * 100, "%", - cpu_time * 100, "%"); - } else { - const char* timeLabel = GetTimeUnitString(result.time_unit); - printer(Out, COLOR_YELLOW, "%s %-4s %s %-4s ", real_time_str.c_str(), timeLabel, - cpu_time_str.c_str(), timeLabel); - } - - if (!result.report_big_o && !result.report_rms) { - printer(Out, COLOR_CYAN, "%10lld", result.iterations); - } - - for (auto& c : result.counters) { - const std::size_t cNameLen = std::max(std::string::size_type(10), - c.first.length()); - auto const& s = HumanReadableNumber(c.second.value, c.second.oneK); - const char* unit = ""; - if (c.second.flags & Counter::kIsRate) - unit = (c.second.flags & Counter::kInvert) ? "s" : "/s"; - if (output_options_ & OO_Tabular) { - printer(Out, COLOR_DEFAULT, " %*s%s", cNameLen - strlen(unit), s.c_str(), - unit); - } else { - printer(Out, COLOR_DEFAULT, " %s=%s%s", c.first.c_str(), s.c_str(), unit); - } - } - - if (!result.report_label.empty()) { - printer(Out, COLOR_DEFAULT, " %s", result.report_label.c_str()); - } - - printer(Out, COLOR_DEFAULT, "\n"); -} - -} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/counter.cc b/benchmarks/thirdparty/benchmark/src/counter.cc deleted file mode 100755 index cf5b78ee3a..0000000000 --- a/benchmarks/thirdparty/benchmark/src/counter.cc +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "counter.h" - -namespace benchmark { -namespace internal { - -double Finish(Counter const& c, IterationCount iterations, double cpu_time, - double num_threads) { - double v = c.value; - if (c.flags & Counter::kIsRate) { - v /= cpu_time; - } - if (c.flags & Counter::kAvgThreads) { - v /= num_threads; - } - if (c.flags & Counter::kIsIterationInvariant) { - v *= iterations; - } - if (c.flags & Counter::kAvgIterations) { - v /= iterations; - } - - if (c.flags & Counter::kInvert) { // Invert is *always* last. - v = 1.0 / v; - } - return v; -} - -void Finish(UserCounters* l, IterationCount iterations, double cpu_time, - double num_threads) { - for (auto& c : *l) { - c.second.value = Finish(c.second, iterations, cpu_time, num_threads); - } -} - -void Increment(UserCounters* l, UserCounters const& r) { - // add counters present in both or just in *l - for (auto& c : *l) { - auto it = r.find(c.first); - if (it != r.end()) { - c.second.value = c.second + it->second; - } - } - // add counters present in r, but not in *l - for (auto const& tc : r) { - auto it = l->find(tc.first); - if (it == l->end()) { - (*l)[tc.first] = tc.second; - } - } -} - -bool SameNames(UserCounters const& l, UserCounters const& r) { - if (&l == &r) return true; - if (l.size() != r.size()) { - return false; - } - for (auto const& c : l) { - if (r.find(c.first) == r.end()) { - return false; - } - } - return true; -} - -} // end namespace internal -} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/counter.h b/benchmarks/thirdparty/benchmark/src/counter.h deleted file mode 100755 index 1f5a58e31f..0000000000 --- a/benchmarks/thirdparty/benchmark/src/counter.h +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef BENCHMARK_COUNTER_H_ -#define BENCHMARK_COUNTER_H_ - -#include "benchmark/benchmark.h" - -namespace benchmark { - -// these counter-related functions are hidden to reduce API surface. -namespace internal { -void Finish(UserCounters* l, IterationCount iterations, double time, - double num_threads); -void Increment(UserCounters* l, UserCounters const& r); -bool SameNames(UserCounters const& l, UserCounters const& r); -} // end namespace internal - -} // end namespace benchmark - -#endif // BENCHMARK_COUNTER_H_ diff --git a/benchmarks/thirdparty/benchmark/src/csv_reporter.cc b/benchmarks/thirdparty/benchmark/src/csv_reporter.cc deleted file mode 100755 index af2c18fc8a..0000000000 --- a/benchmarks/thirdparty/benchmark/src/csv_reporter.cc +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "benchmark/benchmark.h" -#include "complexity.h" - -#include -#include -#include -#include -#include -#include - -#include "check.h" -#include "string_util.h" -#include "timers.h" - -// File format reference: http://edoceo.com/utilitas/csv-file-format. - -namespace benchmark { - -namespace { -std::vector elements = { - "name", "iterations", "real_time", "cpu_time", - "time_unit", "bytes_per_second", "items_per_second", "label", - "error_occurred", "error_message"}; -} // namespace - -std::string CsvEscape(const std::string & s) { - std::string tmp; - tmp.reserve(s.size() + 2); - for (char c : s) { - switch (c) { - case '"' : tmp += "\"\""; break; - default : tmp += c; break; - } - } - return '"' + tmp + '"'; -} - -bool CSVReporter::ReportContext(const Context& context) { - PrintBasicContext(&GetErrorStream(), context); - return true; -} - -void CSVReporter::ReportRuns(const std::vector& reports) { - std::ostream& Out = GetOutputStream(); - - if (!printed_header_) { - // save the names of all the user counters - for (const auto& run : reports) { - for (const auto& cnt : run.counters) { - if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second") - continue; - user_counter_names_.insert(cnt.first); - } - } - - // print the header - for (auto B = elements.begin(); B != elements.end();) { - Out << *B++; - if (B != elements.end()) Out << ","; - } - for (auto B = user_counter_names_.begin(); - B != user_counter_names_.end();) { - Out << ",\"" << *B++ << "\""; - } - Out << "\n"; - - printed_header_ = true; - } else { - // check that all the current counters are saved in the name set - for (const auto& run : reports) { - for (const auto& cnt : run.counters) { - if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second") - continue; - CHECK(user_counter_names_.find(cnt.first) != user_counter_names_.end()) - << "All counters must be present in each run. " - << "Counter named \"" << cnt.first - << "\" was not in a run after being added to the header"; - } - } - } - - // print results for each run - for (const auto& run : reports) { - PrintRunData(run); - } -} - -void CSVReporter::PrintRunData(const Run& run) { - std::ostream& Out = GetOutputStream(); - Out << CsvEscape(run.benchmark_name()) << ","; - if (run.error_occurred) { - Out << std::string(elements.size() - 3, ','); - Out << "true,"; - Out << CsvEscape(run.error_message) << "\n"; - return; - } - - // Do not print iteration on bigO and RMS report - if (!run.report_big_o && !run.report_rms) { - Out << run.iterations; - } - Out << ","; - - Out << run.GetAdjustedRealTime() << ","; - Out << run.GetAdjustedCPUTime() << ","; - - // Do not print timeLabel on bigO and RMS report - if (run.report_big_o) { - Out << GetBigOString(run.complexity); - } else if (!run.report_rms) { - Out << GetTimeUnitString(run.time_unit); - } - Out << ","; - - if (run.counters.find("bytes_per_second") != run.counters.end()) { - Out << run.counters.at("bytes_per_second"); - } - Out << ","; - if (run.counters.find("items_per_second") != run.counters.end()) { - Out << run.counters.at("items_per_second"); - } - Out << ","; - if (!run.report_label.empty()) { - Out << CsvEscape(run.report_label); - } - Out << ",,"; // for error_occurred and error_message - - // Print user counters - for (const auto& ucn : user_counter_names_) { - auto it = run.counters.find(ucn); - if (it == run.counters.end()) { - Out << ","; - } else { - Out << "," << it->second; - } - } - Out << '\n'; -} - -} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/cycleclock.h b/benchmarks/thirdparty/benchmark/src/cycleclock.h deleted file mode 100755 index 179c67cd61..0000000000 --- a/benchmarks/thirdparty/benchmark/src/cycleclock.h +++ /dev/null @@ -1,206 +0,0 @@ -// ---------------------------------------------------------------------- -// CycleClock -// A CycleClock tells you the current time in Cycles. The "time" -// is actually time since power-on. This is like time() but doesn't -// involve a system call and is much more precise. -// -// NOTE: Not all cpu/platform/kernel combinations guarantee that this -// clock increments at a constant rate or is synchronized across all logical -// cpus in a system. -// -// If you need the above guarantees, please consider using a different -// API. There are efforts to provide an interface which provides a millisecond -// granularity and implemented as a memory read. A memory read is generally -// cheaper than the CycleClock for many architectures. -// -// Also, in some out of order CPU implementations, the CycleClock is not -// serializing. So if you're trying to count at cycles granularity, your -// data might be inaccurate due to out of order instruction execution. -// ---------------------------------------------------------------------- - -#ifndef BENCHMARK_CYCLECLOCK_H_ -#define BENCHMARK_CYCLECLOCK_H_ - -#include - -#include "benchmark/benchmark.h" -#include "internal_macros.h" - -#if defined(BENCHMARK_OS_MACOSX) -#include -#endif -// For MSVC, we want to use '_asm rdtsc' when possible (since it works -// with even ancient MSVC compilers), and when not possible the -// __rdtsc intrinsic, declared in . Unfortunately, in some -// environments, and have conflicting -// declarations of some other intrinsics, breaking compilation. -// Therefore, we simply declare __rdtsc ourselves. See also -// http://connect.microsoft.com/VisualStudio/feedback/details/262047 -#if defined(COMPILER_MSVC) && !defined(_M_IX86) -extern "C" uint64_t __rdtsc(); -#pragma intrinsic(__rdtsc) -#endif - -#if !defined(BENCHMARK_OS_WINDOWS) || defined(BENCHMARK_OS_MINGW) -#include -#include -#endif - -#ifdef BENCHMARK_OS_EMSCRIPTEN -#include -#endif - -namespace benchmark { -// NOTE: only i386 and x86_64 have been well tested. -// PPC, sparc, alpha, and ia64 are based on -// http://peter.kuscsik.com/wordpress/?p=14 -// with modifications by m3b. See also -// https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h -namespace cycleclock { -// This should return the number of cycles since power-on. Thread-safe. -inline BENCHMARK_ALWAYS_INLINE int64_t Now() { -#if defined(BENCHMARK_OS_MACOSX) - // this goes at the top because we need ALL Macs, regardless of - // architecture, to return the number of "mach time units" that - // have passed since startup. See sysinfo.cc where - // InitializeSystemInfo() sets the supposed cpu clock frequency of - // macs to the number of mach time units per second, not actual - // CPU clock frequency (which can change in the face of CPU - // frequency scaling). Also note that when the Mac sleeps, this - // counter pauses; it does not continue counting, nor does it - // reset to zero. - return mach_absolute_time(); -#elif defined(BENCHMARK_OS_EMSCRIPTEN) - // this goes above x86-specific code because old versions of Emscripten - // define __x86_64__, although they have nothing to do with it. - return static_cast(emscripten_get_now() * 1e+6); -#elif defined(__i386__) - int64_t ret; - __asm__ volatile("rdtsc" : "=A"(ret)); - return ret; -#elif defined(__x86_64__) || defined(__amd64__) - uint64_t low, high; - __asm__ volatile("rdtsc" : "=a"(low), "=d"(high)); - return (high << 32) | low; -#elif defined(__powerpc__) || defined(__ppc__) - // This returns a time-base, which is not always precisely a cycle-count. -#if defined(__powerpc64__) || defined(__ppc64__) - int64_t tb; - asm volatile("mfspr %0, 268" : "=r"(tb)); - return tb; -#else - uint32_t tbl, tbu0, tbu1; - asm volatile( - "mftbu %0\n" - "mftbl %1\n" - "mftbu %2" - : "=r"(tbu0), "=r"(tbl), "=r"(tbu1)); - tbl &= -static_cast(tbu0 == tbu1); - // high 32 bits in tbu1; low 32 bits in tbl (tbu0 is no longer needed) - return (static_cast(tbu1) << 32) | tbl; -#endif -#elif defined(__sparc__) - int64_t tick; - asm(".byte 0x83, 0x41, 0x00, 0x00"); - asm("mov %%g1, %0" : "=r"(tick)); - return tick; -#elif defined(__ia64__) - int64_t itc; - asm("mov %0 = ar.itc" : "=r"(itc)); - return itc; -#elif defined(COMPILER_MSVC) && defined(_M_IX86) - // Older MSVC compilers (like 7.x) don't seem to support the - // __rdtsc intrinsic properly, so I prefer to use _asm instead - // when I know it will work. Otherwise, I'll use __rdtsc and hope - // the code is being compiled with a non-ancient compiler. - _asm rdtsc -#elif defined(COMPILER_MSVC) - return __rdtsc(); -#elif defined(BENCHMARK_OS_NACL) - // Native Client validator on x86/x86-64 allows RDTSC instructions, - // and this case is handled above. Native Client validator on ARM - // rejects MRC instructions (used in the ARM-specific sequence below), - // so we handle it here. Portable Native Client compiles to - // architecture-agnostic bytecode, which doesn't provide any - // cycle counter access mnemonics. - - // Native Client does not provide any API to access cycle counter. - // Use clock_gettime(CLOCK_MONOTONIC, ...) instead of gettimeofday - // because is provides nanosecond resolution (which is noticable at - // least for PNaCl modules running on x86 Mac & Linux). - // Initialize to always return 0 if clock_gettime fails. - struct timespec ts = {0, 0}; - clock_gettime(CLOCK_MONOTONIC, &ts); - return static_cast(ts.tv_sec) * 1000000000 + ts.tv_nsec; -#elif defined(__aarch64__) - // System timer of ARMv8 runs at a different frequency than the CPU's. - // The frequency is fixed, typically in the range 1-50MHz. It can be - // read at CNTFRQ special register. We assume the OS has set up - // the virtual timer properly. - int64_t virtual_timer_value; - asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value)); - return virtual_timer_value; -#elif defined(__ARM_ARCH) - // V6 is the earliest arch that has a standard cyclecount - // Native Client validator doesn't allow MRC instructions. -#if (__ARM_ARCH >= 6) - uint32_t pmccntr; - uint32_t pmuseren; - uint32_t pmcntenset; - // Read the user mode perf monitor counter access permissions. - asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r"(pmuseren)); - if (pmuseren & 1) { // Allows reading perfmon counters for user mode code. - asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r"(pmcntenset)); - if (pmcntenset & 0x80000000ul) { // Is it counting? - asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r"(pmccntr)); - // The counter is set up to count every 64th cycle - return static_cast(pmccntr) * 64; // Should optimize to << 6 - } - } -#endif - struct timeval tv; - gettimeofday(&tv, nullptr); - return static_cast(tv.tv_sec) * 1000000 + tv.tv_usec; -#elif defined(__mips__) - // mips apparently only allows rdtsc for superusers, so we fall - // back to gettimeofday. It's possible clock_gettime would be better. - struct timeval tv; - gettimeofday(&tv, nullptr); - return static_cast(tv.tv_sec) * 1000000 + tv.tv_usec; -#elif defined(__s390__) // Covers both s390 and s390x. - // Return the CPU clock. - uint64_t tsc; - asm("stck %0" : "=Q"(tsc) : : "cc"); - return tsc; -#elif defined(__riscv) // RISC-V - // Use RDCYCLE (and RDCYCLEH on riscv32) -#if __riscv_xlen == 32 - uint32_t cycles_lo, cycles_hi0, cycles_hi1; - // This asm also includes the PowerPC overflow handling strategy, as above. - // Implemented in assembly because Clang insisted on branching. - asm volatile( - "rdcycleh %0\n" - "rdcycle %1\n" - "rdcycleh %2\n" - "sub %0, %0, %2\n" - "seqz %0, %0\n" - "sub %0, zero, %0\n" - "and %1, %1, %0\n" - : "=r"(cycles_hi0), "=r"(cycles_lo), "=r"(cycles_hi1)); - return (static_cast(cycles_hi1) << 32) | cycles_lo; -#else - uint64_t cycles; - asm volatile("rdcycle %0" : "=r"(cycles)); - return cycles; -#endif -#else -// The soft failover to a generic implementation is automatic only for ARM. -// For other platforms the developer is expected to make an attempt to create -// a fast implementation and use generic version if nothing better is available. -#error You need to define CycleTimer for your OS and CPU -#endif -} -} // end namespace cycleclock -} // end namespace benchmark - -#endif // BENCHMARK_CYCLECLOCK_H_ diff --git a/benchmarks/thirdparty/benchmark/src/internal_macros.h b/benchmarks/thirdparty/benchmark/src/internal_macros.h deleted file mode 100755 index 6adf00d056..0000000000 --- a/benchmarks/thirdparty/benchmark/src/internal_macros.h +++ /dev/null @@ -1,94 +0,0 @@ -#ifndef BENCHMARK_INTERNAL_MACROS_H_ -#define BENCHMARK_INTERNAL_MACROS_H_ - -#include "benchmark/benchmark.h" - -/* Needed to detect STL */ -#include - -// clang-format off - -#ifndef __has_feature -#define __has_feature(x) 0 -#endif - -#if defined(__clang__) - #if !defined(COMPILER_CLANG) - #define COMPILER_CLANG - #endif -#elif defined(_MSC_VER) - #if !defined(COMPILER_MSVC) - #define COMPILER_MSVC - #endif -#elif defined(__GNUC__) - #if !defined(COMPILER_GCC) - #define COMPILER_GCC - #endif -#endif - -#if __has_feature(cxx_attributes) - #define BENCHMARK_NORETURN [[noreturn]] -#elif defined(__GNUC__) - #define BENCHMARK_NORETURN __attribute__((noreturn)) -#elif defined(COMPILER_MSVC) - #define BENCHMARK_NORETURN __declspec(noreturn) -#else - #define BENCHMARK_NORETURN -#endif - -#if defined(__CYGWIN__) - #define BENCHMARK_OS_CYGWIN 1 -#elif defined(_WIN32) - #define BENCHMARK_OS_WINDOWS 1 - #if defined(__MINGW32__) - #define BENCHMARK_OS_MINGW 1 - #endif -#elif defined(__APPLE__) - #define BENCHMARK_OS_APPLE 1 - #include "TargetConditionals.h" - #if defined(TARGET_OS_MAC) - #define BENCHMARK_OS_MACOSX 1 - #if defined(TARGET_OS_IPHONE) - #define BENCHMARK_OS_IOS 1 - #endif - #endif -#elif defined(__FreeBSD__) - #define BENCHMARK_OS_FREEBSD 1 -#elif defined(__NetBSD__) - #define BENCHMARK_OS_NETBSD 1 -#elif defined(__OpenBSD__) - #define BENCHMARK_OS_OPENBSD 1 -#elif defined(__linux__) - #define BENCHMARK_OS_LINUX 1 -#elif defined(__native_client__) - #define BENCHMARK_OS_NACL 1 -#elif defined(__EMSCRIPTEN__) - #define BENCHMARK_OS_EMSCRIPTEN 1 -#elif defined(__rtems__) - #define BENCHMARK_OS_RTEMS 1 -#elif defined(__Fuchsia__) -#define BENCHMARK_OS_FUCHSIA 1 -#elif defined (__SVR4) && defined (__sun) -#define BENCHMARK_OS_SOLARIS 1 -#elif defined(__QNX__) -#define BENCHMARK_OS_QNX 1 -#endif - -#if defined(__ANDROID__) && defined(__GLIBCXX__) -#define BENCHMARK_STL_ANDROID_GNUSTL 1 -#endif - -#if !__has_feature(cxx_exceptions) && !defined(__cpp_exceptions) \ - && !defined(__EXCEPTIONS) - #define BENCHMARK_HAS_NO_EXCEPTIONS -#endif - -#if defined(COMPILER_CLANG) || defined(COMPILER_GCC) - #define BENCHMARK_MAYBE_UNUSED __attribute__((unused)) -#else - #define BENCHMARK_MAYBE_UNUSED -#endif - -// clang-format on - -#endif // BENCHMARK_INTERNAL_MACROS_H_ diff --git a/benchmarks/thirdparty/benchmark/src/json_reporter.cc b/benchmarks/thirdparty/benchmark/src/json_reporter.cc deleted file mode 100755 index 959d245a34..0000000000 --- a/benchmarks/thirdparty/benchmark/src/json_reporter.cc +++ /dev/null @@ -1,255 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "benchmark/benchmark.h" -#include "complexity.h" - -#include -#include -#include -#include // for setprecision -#include -#include -#include -#include -#include - -#include "string_util.h" -#include "timers.h" - -namespace benchmark { - -namespace { - -std::string StrEscape(const std::string & s) { - std::string tmp; - tmp.reserve(s.size()); - for (char c : s) { - switch (c) { - case '\b': tmp += "\\b"; break; - case '\f': tmp += "\\f"; break; - case '\n': tmp += "\\n"; break; - case '\r': tmp += "\\r"; break; - case '\t': tmp += "\\t"; break; - case '\\': tmp += "\\\\"; break; - case '"' : tmp += "\\\""; break; - default : tmp += c; break; - } - } - return tmp; -} - -std::string FormatKV(std::string const& key, std::string const& value) { - return StrFormat("\"%s\": \"%s\"", StrEscape(key).c_str(), StrEscape(value).c_str()); -} - -std::string FormatKV(std::string const& key, const char* value) { - return StrFormat("\"%s\": \"%s\"", StrEscape(key).c_str(), StrEscape(value).c_str()); -} - -std::string FormatKV(std::string const& key, bool value) { - return StrFormat("\"%s\": %s", StrEscape(key).c_str(), value ? "true" : "false"); -} - -std::string FormatKV(std::string const& key, int64_t value) { - std::stringstream ss; - ss << '"' << StrEscape(key) << "\": " << value; - return ss.str(); -} - -std::string FormatKV(std::string const& key, IterationCount value) { - std::stringstream ss; - ss << '"' << StrEscape(key) << "\": " << value; - return ss.str(); -} - -std::string FormatKV(std::string const& key, double value) { - std::stringstream ss; - ss << '"' << StrEscape(key) << "\": "; - - if (std::isnan(value)) - ss << (value < 0 ? "-" : "") << "NaN"; - else if (std::isinf(value)) - ss << (value < 0 ? "-" : "") << "Infinity"; - else { - const auto max_digits10 = - std::numeric_limits::max_digits10; - const auto max_fractional_digits10 = max_digits10 - 1; - ss << std::scientific << std::setprecision(max_fractional_digits10) - << value; - } - return ss.str(); -} - -int64_t RoundDouble(double v) { return std::lround(v); } - -} // end namespace - -bool JSONReporter::ReportContext(const Context& context) { - std::ostream& out = GetOutputStream(); - - out << "{\n"; - std::string inner_indent(2, ' '); - - // Open context block and print context information. - out << inner_indent << "\"context\": {\n"; - std::string indent(4, ' '); - - std::string walltime_value = LocalDateTimeString(); - out << indent << FormatKV("date", walltime_value) << ",\n"; - - out << indent << FormatKV("host_name", context.sys_info.name) << ",\n"; - - if (Context::executable_name) { - out << indent << FormatKV("executable", Context::executable_name) << ",\n"; - } - - CPUInfo const& info = context.cpu_info; - out << indent << FormatKV("num_cpus", static_cast(info.num_cpus)) - << ",\n"; - out << indent - << FormatKV("mhz_per_cpu", - RoundDouble(info.cycles_per_second / 1000000.0)) - << ",\n"; - if (CPUInfo::Scaling::UNKNOWN != info.scaling) { - out << indent << FormatKV("cpu_scaling_enabled", info.scaling == CPUInfo::Scaling::ENABLED ? true : false) - << ",\n"; - } - - out << indent << "\"caches\": [\n"; - indent = std::string(6, ' '); - std::string cache_indent(8, ' '); - for (size_t i = 0; i < info.caches.size(); ++i) { - auto& CI = info.caches[i]; - out << indent << "{\n"; - out << cache_indent << FormatKV("type", CI.type) << ",\n"; - out << cache_indent << FormatKV("level", static_cast(CI.level)) - << ",\n"; - out << cache_indent - << FormatKV("size", static_cast(CI.size)) << ",\n"; - out << cache_indent - << FormatKV("num_sharing", static_cast(CI.num_sharing)) - << "\n"; - out << indent << "}"; - if (i != info.caches.size() - 1) out << ","; - out << "\n"; - } - indent = std::string(4, ' '); - out << indent << "],\n"; - out << indent << "\"load_avg\": ["; - for (auto it = info.load_avg.begin(); it != info.load_avg.end();) { - out << *it++; - if (it != info.load_avg.end()) out << ","; - } - out << "],\n"; - -#if defined(NDEBUG) - const char build_type[] = "release"; -#else - const char build_type[] = "debug"; -#endif - out << indent << FormatKV("library_build_type", build_type) << "\n"; - // Close context block and open the list of benchmarks. - out << inner_indent << "},\n"; - out << inner_indent << "\"benchmarks\": [\n"; - return true; -} - -void JSONReporter::ReportRuns(std::vector const& reports) { - if (reports.empty()) { - return; - } - std::string indent(4, ' '); - std::ostream& out = GetOutputStream(); - if (!first_report_) { - out << ",\n"; - } - first_report_ = false; - - for (auto it = reports.begin(); it != reports.end(); ++it) { - out << indent << "{\n"; - PrintRunData(*it); - out << indent << '}'; - auto it_cp = it; - if (++it_cp != reports.end()) { - out << ",\n"; - } - } -} - -void JSONReporter::Finalize() { - // Close the list of benchmarks and the top level object. - GetOutputStream() << "\n ]\n}\n"; -} - -void JSONReporter::PrintRunData(Run const& run) { - std::string indent(6, ' '); - std::ostream& out = GetOutputStream(); - out << indent << FormatKV("name", run.benchmark_name()) << ",\n"; - out << indent << FormatKV("run_name", run.run_name.str()) << ",\n"; - out << indent << FormatKV("run_type", [&run]() -> const char* { - switch (run.run_type) { - case BenchmarkReporter::Run::RT_Iteration: - return "iteration"; - case BenchmarkReporter::Run::RT_Aggregate: - return "aggregate"; - } - BENCHMARK_UNREACHABLE(); - }()) << ",\n"; - out << indent << FormatKV("repetitions", run.repetitions) << ",\n"; - if (run.run_type != BenchmarkReporter::Run::RT_Aggregate) { - out << indent << FormatKV("repetition_index", run.repetition_index) - << ",\n"; - } - out << indent << FormatKV("threads", run.threads) << ",\n"; - if (run.run_type == BenchmarkReporter::Run::RT_Aggregate) { - out << indent << FormatKV("aggregate_name", run.aggregate_name) << ",\n"; - } - if (run.error_occurred) { - out << indent << FormatKV("error_occurred", run.error_occurred) << ",\n"; - out << indent << FormatKV("error_message", run.error_message) << ",\n"; - } - if (!run.report_big_o && !run.report_rms) { - out << indent << FormatKV("iterations", run.iterations) << ",\n"; - out << indent << FormatKV("real_time", run.GetAdjustedRealTime()) << ",\n"; - out << indent << FormatKV("cpu_time", run.GetAdjustedCPUTime()); - out << ",\n" - << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit)); - } else if (run.report_big_o) { - out << indent << FormatKV("cpu_coefficient", run.GetAdjustedCPUTime()) - << ",\n"; - out << indent << FormatKV("real_coefficient", run.GetAdjustedRealTime()) - << ",\n"; - out << indent << FormatKV("big_o", GetBigOString(run.complexity)) << ",\n"; - out << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit)); - } else if (run.report_rms) { - out << indent << FormatKV("rms", run.GetAdjustedCPUTime()); - } - - for (auto& c : run.counters) { - out << ",\n" << indent << FormatKV(c.first, c.second); - } - - if (run.has_memory_result) { - out << ",\n" << indent << FormatKV("allocs_per_iter", run.allocs_per_iter); - out << ",\n" << indent << FormatKV("max_bytes_used", run.max_bytes_used); - } - - if (!run.report_label.empty()) { - out << ",\n" << indent << FormatKV("label", run.report_label); - } - out << '\n'; -} - -} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/log.h b/benchmarks/thirdparty/benchmark/src/log.h deleted file mode 100755 index 47d0c35c01..0000000000 --- a/benchmarks/thirdparty/benchmark/src/log.h +++ /dev/null @@ -1,74 +0,0 @@ -#ifndef BENCHMARK_LOG_H_ -#define BENCHMARK_LOG_H_ - -#include -#include - -#include "benchmark/benchmark.h" - -namespace benchmark { -namespace internal { - -typedef std::basic_ostream&(EndLType)(std::basic_ostream&); - -class LogType { - friend LogType& GetNullLogInstance(); - friend LogType& GetErrorLogInstance(); - - // FIXME: Add locking to output. - template - friend LogType& operator<<(LogType&, Tp const&); - friend LogType& operator<<(LogType&, EndLType*); - - private: - LogType(std::ostream* out) : out_(out) {} - std::ostream* out_; - BENCHMARK_DISALLOW_COPY_AND_ASSIGN(LogType); -}; - -template -LogType& operator<<(LogType& log, Tp const& value) { - if (log.out_) { - *log.out_ << value; - } - return log; -} - -inline LogType& operator<<(LogType& log, EndLType* m) { - if (log.out_) { - *log.out_ << m; - } - return log; -} - -inline int& LogLevel() { - static int log_level = 0; - return log_level; -} - -inline LogType& GetNullLogInstance() { - static LogType log(nullptr); - return log; -} - -inline LogType& GetErrorLogInstance() { - static LogType log(&std::clog); - return log; -} - -inline LogType& GetLogInstanceForLevel(int level) { - if (level <= LogLevel()) { - return GetErrorLogInstance(); - } - return GetNullLogInstance(); -} - -} // end namespace internal -} // end namespace benchmark - -// clang-format off -#define VLOG(x) \ - (::benchmark::internal::GetLogInstanceForLevel(x) << "-- LOG(" << x << "):" \ - " ") -// clang-format on -#endif diff --git a/benchmarks/thirdparty/benchmark/src/mutex.h b/benchmarks/thirdparty/benchmark/src/mutex.h deleted file mode 100755 index 3fac79aea4..0000000000 --- a/benchmarks/thirdparty/benchmark/src/mutex.h +++ /dev/null @@ -1,155 +0,0 @@ -#ifndef BENCHMARK_MUTEX_H_ -#define BENCHMARK_MUTEX_H_ - -#include -#include - -#include "check.h" - -// Enable thread safety attributes only with clang. -// The attributes can be safely erased when compiling with other compilers. -#if defined(HAVE_THREAD_SAFETY_ATTRIBUTES) -#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x)) -#else -#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op -#endif - -#define CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(capability(x)) - -#define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable) - -#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x)) - -#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x)) - -#define ACQUIRED_BEFORE(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__)) - -#define ACQUIRED_AFTER(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__)) - -#define REQUIRES(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(requires_capability(__VA_ARGS__)) - -#define REQUIRES_SHARED(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(requires_shared_capability(__VA_ARGS__)) - -#define ACQUIRE(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(acquire_capability(__VA_ARGS__)) - -#define ACQUIRE_SHARED(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(acquire_shared_capability(__VA_ARGS__)) - -#define RELEASE(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(release_capability(__VA_ARGS__)) - -#define RELEASE_SHARED(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(release_shared_capability(__VA_ARGS__)) - -#define TRY_ACQUIRE(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_capability(__VA_ARGS__)) - -#define TRY_ACQUIRE_SHARED(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_shared_capability(__VA_ARGS__)) - -#define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__)) - -#define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(assert_capability(x)) - -#define ASSERT_SHARED_CAPABILITY(x) \ - THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_capability(x)) - -#define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x)) - -#define NO_THREAD_SAFETY_ANALYSIS \ - THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis) - -namespace benchmark { - -typedef std::condition_variable Condition; - -// NOTE: Wrappers for std::mutex and std::unique_lock are provided so that -// we can annotate them with thread safety attributes and use the -// -Wthread-safety warning with clang. The standard library types cannot be -// used directly because they do not provide the required annotations. -class CAPABILITY("mutex") Mutex { - public: - Mutex() {} - - void lock() ACQUIRE() { mut_.lock(); } - void unlock() RELEASE() { mut_.unlock(); } - std::mutex& native_handle() { return mut_; } - - private: - std::mutex mut_; -}; - -class SCOPED_CAPABILITY MutexLock { - typedef std::unique_lock MutexLockImp; - - public: - MutexLock(Mutex& m) ACQUIRE(m) : ml_(m.native_handle()) {} - ~MutexLock() RELEASE() {} - MutexLockImp& native_handle() { return ml_; } - - private: - MutexLockImp ml_; -}; - -class Barrier { - public: - Barrier(int num_threads) : running_threads_(num_threads) {} - - // Called by each thread - bool wait() EXCLUDES(lock_) { - bool last_thread = false; - { - MutexLock ml(lock_); - last_thread = createBarrier(ml); - } - if (last_thread) phase_condition_.notify_all(); - return last_thread; - } - - void removeThread() EXCLUDES(lock_) { - MutexLock ml(lock_); - --running_threads_; - if (entered_ != 0) phase_condition_.notify_all(); - } - - private: - Mutex lock_; - Condition phase_condition_; - int running_threads_; - - // State for barrier management - int phase_number_ = 0; - int entered_ = 0; // Number of threads that have entered this barrier - - // Enter the barrier and wait until all other threads have also - // entered the barrier. Returns iff this is the last thread to - // enter the barrier. - bool createBarrier(MutexLock& ml) REQUIRES(lock_) { - CHECK_LT(entered_, running_threads_); - entered_++; - if (entered_ < running_threads_) { - // Wait for all threads to enter - int phase_number_cp = phase_number_; - auto cb = [this, phase_number_cp]() { - return this->phase_number_ > phase_number_cp || - entered_ == running_threads_; // A thread has aborted in error - }; - phase_condition_.wait(ml.native_handle(), cb); - if (phase_number_ > phase_number_cp) return false; - // else (running_threads_ == entered_) and we are the last thread. - } - // Last thread has reached the barrier - phase_number_++; - entered_ = 0; - return true; - } -}; - -} // end namespace benchmark - -#endif // BENCHMARK_MUTEX_H_ diff --git a/benchmarks/thirdparty/benchmark/src/re.h b/benchmarks/thirdparty/benchmark/src/re.h deleted file mode 100755 index fbe25037b4..0000000000 --- a/benchmarks/thirdparty/benchmark/src/re.h +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef BENCHMARK_RE_H_ -#define BENCHMARK_RE_H_ - -#include "internal_macros.h" - -// clang-format off - -#if !defined(HAVE_STD_REGEX) && \ - !defined(HAVE_GNU_POSIX_REGEX) && \ - !defined(HAVE_POSIX_REGEX) - // No explicit regex selection; detect based on builtin hints. - #if defined(BENCHMARK_OS_LINUX) || defined(BENCHMARK_OS_APPLE) - #define HAVE_POSIX_REGEX 1 - #elif __cplusplus >= 199711L - #define HAVE_STD_REGEX 1 - #endif -#endif - -// Prefer C regex libraries when compiling w/o exceptions so that we can -// correctly report errors. -#if defined(BENCHMARK_HAS_NO_EXCEPTIONS) && \ - defined(BENCHMARK_HAVE_STD_REGEX) && \ - (defined(HAVE_GNU_POSIX_REGEX) || defined(HAVE_POSIX_REGEX)) - #undef HAVE_STD_REGEX -#endif - -#if defined(HAVE_STD_REGEX) - #include -#elif defined(HAVE_GNU_POSIX_REGEX) - #include -#elif defined(HAVE_POSIX_REGEX) - #include -#else -#error No regular expression backend was found! -#endif - -// clang-format on - -#include - -#include "check.h" - -namespace benchmark { - -// A wrapper around the POSIX regular expression API that provides automatic -// cleanup -class Regex { - public: - Regex() : init_(false) {} - - ~Regex(); - - // Compile a regular expression matcher from spec. Returns true on success. - // - // On failure (and if error is not nullptr), error is populated with a human - // readable error message if an error occurs. - bool Init(const std::string& spec, std::string* error); - - // Returns whether str matches the compiled regular expression. - bool Match(const std::string& str); - - private: - bool init_; -// Underlying regular expression object -#if defined(HAVE_STD_REGEX) - std::regex re_; -#elif defined(HAVE_POSIX_REGEX) || defined(HAVE_GNU_POSIX_REGEX) - regex_t re_; -#else -#error No regular expression backend implementation available -#endif -}; - -#if defined(HAVE_STD_REGEX) - -inline bool Regex::Init(const std::string& spec, std::string* error) { -#ifdef BENCHMARK_HAS_NO_EXCEPTIONS - ((void)error); // suppress unused warning -#else - try { -#endif - re_ = std::regex(spec, std::regex_constants::extended); - init_ = true; -#ifndef BENCHMARK_HAS_NO_EXCEPTIONS -} -catch (const std::regex_error& e) { - if (error) { - *error = e.what(); - } -} -#endif -return init_; -} - -inline Regex::~Regex() {} - -inline bool Regex::Match(const std::string& str) { - if (!init_) { - return false; - } - return std::regex_search(str, re_); -} - -#else -inline bool Regex::Init(const std::string& spec, std::string* error) { - int ec = regcomp(&re_, spec.c_str(), REG_EXTENDED | REG_NOSUB); - if (ec != 0) { - if (error) { - size_t needed = regerror(ec, &re_, nullptr, 0); - char* errbuf = new char[needed]; - regerror(ec, &re_, errbuf, needed); - - // regerror returns the number of bytes necessary to null terminate - // the string, so we move that when assigning to error. - CHECK_NE(needed, 0); - error->assign(errbuf, needed - 1); - - delete[] errbuf; - } - - return false; - } - - init_ = true; - return true; -} - -inline Regex::~Regex() { - if (init_) { - regfree(&re_); - } -} - -inline bool Regex::Match(const std::string& str) { - if (!init_) { - return false; - } - return regexec(&re_, str.c_str(), 0, nullptr, 0) == 0; -} -#endif - -} // end namespace benchmark - -#endif // BENCHMARK_RE_H_ diff --git a/benchmarks/thirdparty/benchmark/src/reporter.cc b/benchmarks/thirdparty/benchmark/src/reporter.cc deleted file mode 100755 index 337575a118..0000000000 --- a/benchmarks/thirdparty/benchmark/src/reporter.cc +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "benchmark/benchmark.h" -#include "timers.h" - -#include - -#include -#include -#include - -#include "check.h" -#include "string_util.h" - -namespace benchmark { - -BenchmarkReporter::BenchmarkReporter() - : output_stream_(&std::cout), error_stream_(&std::cerr) {} - -BenchmarkReporter::~BenchmarkReporter() {} - -void BenchmarkReporter::PrintBasicContext(std::ostream *out, - Context const &context) { - CHECK(out) << "cannot be null"; - auto &Out = *out; - - Out << LocalDateTimeString() << "\n"; - - if (context.executable_name) - Out << "Running " << context.executable_name << "\n"; - - const CPUInfo &info = context.cpu_info; - Out << "Run on (" << info.num_cpus << " X " - << (info.cycles_per_second / 1000000.0) << " MHz CPU " - << ((info.num_cpus > 1) ? "s" : "") << ")\n"; - if (info.caches.size() != 0) { - Out << "CPU Caches:\n"; - for (auto &CInfo : info.caches) { - Out << " L" << CInfo.level << " " << CInfo.type << " " - << (CInfo.size / 1024) << " KiB"; - if (CInfo.num_sharing != 0) - Out << " (x" << (info.num_cpus / CInfo.num_sharing) << ")"; - Out << "\n"; - } - } - if (!info.load_avg.empty()) { - Out << "Load Average: "; - for (auto It = info.load_avg.begin(); It != info.load_avg.end();) { - Out << StrFormat("%.2f", *It++); - if (It != info.load_avg.end()) Out << ", "; - } - Out << "\n"; - } - - if (CPUInfo::Scaling::ENABLED == info.scaling) { - Out << "***WARNING*** CPU scaling is enabled, the benchmark " - "real time measurements may be noisy and will incur extra " - "overhead.\n"; - } - -#ifndef NDEBUG - Out << "***WARNING*** Library was built as DEBUG. Timings may be " - "affected.\n"; -#endif -} - -// No initializer because it's already initialized to NULL. -const char *BenchmarkReporter::Context::executable_name; - -BenchmarkReporter::Context::Context() - : cpu_info(CPUInfo::Get()), sys_info(SystemInfo::Get()) {} - -std::string BenchmarkReporter::Run::benchmark_name() const { - std::string name = run_name.str(); - if (run_type == RT_Aggregate) { - name += "_" + aggregate_name; - } - return name; -} - -double BenchmarkReporter::Run::GetAdjustedRealTime() const { - double new_time = real_accumulated_time * GetTimeUnitMultiplier(time_unit); - if (iterations != 0) new_time /= static_cast(iterations); - return new_time; -} - -double BenchmarkReporter::Run::GetAdjustedCPUTime() const { - double new_time = cpu_accumulated_time * GetTimeUnitMultiplier(time_unit); - if (iterations != 0) new_time /= static_cast(iterations); - return new_time; -} - -} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/sleep.cc b/benchmarks/thirdparty/benchmark/src/sleep.cc deleted file mode 100755 index 1512ac90f7..0000000000 --- a/benchmarks/thirdparty/benchmark/src/sleep.cc +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "sleep.h" - -#include -#include -#include - -#include "internal_macros.h" - -#ifdef BENCHMARK_OS_WINDOWS -#include -#endif - -namespace benchmark { -#ifdef BENCHMARK_OS_WINDOWS -// Window's Sleep takes milliseconds argument. -void SleepForMilliseconds(int milliseconds) { Sleep(milliseconds); } -void SleepForSeconds(double seconds) { - SleepForMilliseconds(static_cast(kNumMillisPerSecond * seconds)); -} -#else // BENCHMARK_OS_WINDOWS -void SleepForMicroseconds(int microseconds) { - struct timespec sleep_time; - sleep_time.tv_sec = microseconds / kNumMicrosPerSecond; - sleep_time.tv_nsec = (microseconds % kNumMicrosPerSecond) * kNumNanosPerMicro; - while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR) - ; // Ignore signals and wait for the full interval to elapse. -} - -void SleepForMilliseconds(int milliseconds) { - SleepForMicroseconds(milliseconds * kNumMicrosPerMilli); -} - -void SleepForSeconds(double seconds) { - SleepForMicroseconds(static_cast(seconds * kNumMicrosPerSecond)); -} -#endif // BENCHMARK_OS_WINDOWS -} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/sleep.h b/benchmarks/thirdparty/benchmark/src/sleep.h deleted file mode 100755 index f98551afe2..0000000000 --- a/benchmarks/thirdparty/benchmark/src/sleep.h +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef BENCHMARK_SLEEP_H_ -#define BENCHMARK_SLEEP_H_ - -namespace benchmark { -const int kNumMillisPerSecond = 1000; -const int kNumMicrosPerMilli = 1000; -const int kNumMicrosPerSecond = kNumMillisPerSecond * 1000; -const int kNumNanosPerMicro = 1000; -const int kNumNanosPerSecond = kNumNanosPerMicro * kNumMicrosPerSecond; - -void SleepForMilliseconds(int milliseconds); -void SleepForSeconds(double seconds); -} // end namespace benchmark - -#endif // BENCHMARK_SLEEP_H_ diff --git a/benchmarks/thirdparty/benchmark/src/statistics.cc b/benchmarks/thirdparty/benchmark/src/statistics.cc deleted file mode 100755 index bd5a3d6597..0000000000 --- a/benchmarks/thirdparty/benchmark/src/statistics.cc +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2016 Ismael Jimenez Martinez. All rights reserved. -// Copyright 2017 Roman Lebedev. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "benchmark/benchmark.h" - -#include -#include -#include -#include -#include -#include "check.h" -#include "statistics.h" - -namespace benchmark { - -auto StatisticsSum = [](const std::vector& v) { - return std::accumulate(v.begin(), v.end(), 0.0); -}; - -double StatisticsMean(const std::vector& v) { - if (v.empty()) return 0.0; - return StatisticsSum(v) * (1.0 / v.size()); -} - -double StatisticsMedian(const std::vector& v) { - if (v.size() < 3) return StatisticsMean(v); - std::vector copy(v); - - auto center = copy.begin() + v.size() / 2; - std::nth_element(copy.begin(), center, copy.end()); - - // did we have an odd number of samples? - // if yes, then center is the median - // it no, then we are looking for the average between center and the value - // before - if (v.size() % 2 == 1) return *center; - auto center2 = copy.begin() + v.size() / 2 - 1; - std::nth_element(copy.begin(), center2, copy.end()); - return (*center + *center2) / 2.0; -} - -// Return the sum of the squares of this sample set -auto SumSquares = [](const std::vector& v) { - return std::inner_product(v.begin(), v.end(), v.begin(), 0.0); -}; - -auto Sqr = [](const double dat) { return dat * dat; }; -auto Sqrt = [](const double dat) { - // Avoid NaN due to imprecision in the calculations - if (dat < 0.0) return 0.0; - return std::sqrt(dat); -}; - -double StatisticsStdDev(const std::vector& v) { - const auto mean = StatisticsMean(v); - if (v.empty()) return mean; - - // Sample standard deviation is undefined for n = 1 - if (v.size() == 1) return 0.0; - - const double avg_squares = SumSquares(v) * (1.0 / v.size()); - return Sqrt(v.size() / (v.size() - 1.0) * (avg_squares - Sqr(mean))); -} - -std::vector ComputeStats( - const std::vector& reports) { - typedef BenchmarkReporter::Run Run; - std::vector results; - - auto error_count = - std::count_if(reports.begin(), reports.end(), - [](Run const& run) { return run.error_occurred; }); - - if (reports.size() - error_count < 2) { - // We don't report aggregated data if there was a single run. - return results; - } - - // Accumulators. - std::vector real_accumulated_time_stat; - std::vector cpu_accumulated_time_stat; - - real_accumulated_time_stat.reserve(reports.size()); - cpu_accumulated_time_stat.reserve(reports.size()); - - // All repetitions should be run with the same number of iterations so we - // can take this information from the first benchmark. - const IterationCount run_iterations = reports.front().iterations; - // create stats for user counters - struct CounterStat { - Counter c; - std::vector s; - }; - std::map counter_stats; - for (Run const& r : reports) { - for (auto const& cnt : r.counters) { - auto it = counter_stats.find(cnt.first); - if (it == counter_stats.end()) { - counter_stats.insert({cnt.first, {cnt.second, std::vector{}}}); - it = counter_stats.find(cnt.first); - it->second.s.reserve(reports.size()); - } else { - CHECK_EQ(counter_stats[cnt.first].c.flags, cnt.second.flags); - } - } - } - - // Populate the accumulators. - for (Run const& run : reports) { - CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name()); - CHECK_EQ(run_iterations, run.iterations); - if (run.error_occurred) continue; - real_accumulated_time_stat.emplace_back(run.real_accumulated_time); - cpu_accumulated_time_stat.emplace_back(run.cpu_accumulated_time); - // user counters - for (auto const& cnt : run.counters) { - auto it = counter_stats.find(cnt.first); - CHECK_NE(it, counter_stats.end()); - it->second.s.emplace_back(cnt.second); - } - } - - // Only add label if it is same for all runs - std::string report_label = reports[0].report_label; - for (std::size_t i = 1; i < reports.size(); i++) { - if (reports[i].report_label != report_label) { - report_label = ""; - break; - } - } - - const double iteration_rescale_factor = - double(reports.size()) / double(run_iterations); - - for (const auto& Stat : *reports[0].statistics) { - // Get the data from the accumulator to BenchmarkReporter::Run's. - Run data; - data.run_name = reports[0].run_name; - data.run_type = BenchmarkReporter::Run::RT_Aggregate; - data.threads = reports[0].threads; - data.repetitions = reports[0].repetitions; - data.repetition_index = Run::no_repetition_index; - data.aggregate_name = Stat.name_; - data.report_label = report_label; - - // It is incorrect to say that an aggregate is computed over - // run's iterations, because those iterations already got averaged. - // Similarly, if there are N repetitions with 1 iterations each, - // an aggregate will be computed over N measurements, not 1. - // Thus it is best to simply use the count of separate reports. - data.iterations = reports.size(); - - data.real_accumulated_time = Stat.compute_(real_accumulated_time_stat); - data.cpu_accumulated_time = Stat.compute_(cpu_accumulated_time_stat); - - // We will divide these times by data.iterations when reporting, but the - // data.iterations is not nessesairly the scale of these measurements, - // because in each repetition, these timers are sum over all the iterations. - // And if we want to say that the stats are over N repetitions and not - // M iterations, we need to multiply these by (N/M). - data.real_accumulated_time *= iteration_rescale_factor; - data.cpu_accumulated_time *= iteration_rescale_factor; - - data.time_unit = reports[0].time_unit; - - // user counters - for (auto const& kv : counter_stats) { - // Do *NOT* rescale the custom counters. They are already properly scaled. - const auto uc_stat = Stat.compute_(kv.second.s); - auto c = Counter(uc_stat, counter_stats[kv.first].c.flags, - counter_stats[kv.first].c.oneK); - data.counters[kv.first] = c; - } - - results.push_back(data); - } - - return results; -} - -} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/statistics.h b/benchmarks/thirdparty/benchmark/src/statistics.h deleted file mode 100755 index 7eccc85536..0000000000 --- a/benchmarks/thirdparty/benchmark/src/statistics.h +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2016 Ismael Jimenez Martinez. All rights reserved. -// Copyright 2017 Roman Lebedev. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef STATISTICS_H_ -#define STATISTICS_H_ - -#include - -#include "benchmark/benchmark.h" - -namespace benchmark { - -// Return a vector containing the mean, median and standard devation information -// (and any user-specified info) for the specified list of reports. If 'reports' -// contains less than two non-errored runs an empty vector is returned -std::vector ComputeStats( - const std::vector& reports); - -double StatisticsMean(const std::vector& v); -double StatisticsMedian(const std::vector& v); -double StatisticsStdDev(const std::vector& v); - -} // end namespace benchmark - -#endif // STATISTICS_H_ diff --git a/benchmarks/thirdparty/benchmark/src/string_util.cc b/benchmarks/thirdparty/benchmark/src/string_util.cc deleted file mode 100755 index ac60b5588f..0000000000 --- a/benchmarks/thirdparty/benchmark/src/string_util.cc +++ /dev/null @@ -1,255 +0,0 @@ -#include "string_util.h" - -#include -#ifdef BENCHMARK_STL_ANDROID_GNUSTL -#include -#endif -#include -#include -#include -#include -#include - -#include "arraysize.h" - -namespace benchmark { -namespace { - -// kilo, Mega, Giga, Tera, Peta, Exa, Zetta, Yotta. -const char kBigSIUnits[] = "kMGTPEZY"; -// Kibi, Mebi, Gibi, Tebi, Pebi, Exbi, Zebi, Yobi. -const char kBigIECUnits[] = "KMGTPEZY"; -// milli, micro, nano, pico, femto, atto, zepto, yocto. -const char kSmallSIUnits[] = "munpfazy"; - -// We require that all three arrays have the same size. -static_assert(arraysize(kBigSIUnits) == arraysize(kBigIECUnits), - "SI and IEC unit arrays must be the same size"); -static_assert(arraysize(kSmallSIUnits) == arraysize(kBigSIUnits), - "Small SI and Big SI unit arrays must be the same size"); - -static const int64_t kUnitsSize = arraysize(kBigSIUnits); - -void ToExponentAndMantissa(double val, double thresh, int precision, - double one_k, std::string* mantissa, - int64_t* exponent) { - std::stringstream mantissa_stream; - - if (val < 0) { - mantissa_stream << "-"; - val = -val; - } - - // Adjust threshold so that it never excludes things which can't be rendered - // in 'precision' digits. - const double adjusted_threshold = - std::max(thresh, 1.0 / std::pow(10.0, precision)); - const double big_threshold = adjusted_threshold * one_k; - const double small_threshold = adjusted_threshold; - // Values in ]simple_threshold,small_threshold[ will be printed as-is - const double simple_threshold = 0.01; - - if (val > big_threshold) { - // Positive powers - double scaled = val; - for (size_t i = 0; i < arraysize(kBigSIUnits); ++i) { - scaled /= one_k; - if (scaled <= big_threshold) { - mantissa_stream << scaled; - *exponent = i + 1; - *mantissa = mantissa_stream.str(); - return; - } - } - mantissa_stream << val; - *exponent = 0; - } else if (val < small_threshold) { - // Negative powers - if (val < simple_threshold) { - double scaled = val; - for (size_t i = 0; i < arraysize(kSmallSIUnits); ++i) { - scaled *= one_k; - if (scaled >= small_threshold) { - mantissa_stream << scaled; - *exponent = -static_cast(i + 1); - *mantissa = mantissa_stream.str(); - return; - } - } - } - mantissa_stream << val; - *exponent = 0; - } else { - mantissa_stream << val; - *exponent = 0; - } - *mantissa = mantissa_stream.str(); -} - -std::string ExponentToPrefix(int64_t exponent, bool iec) { - if (exponent == 0) return ""; - - const int64_t index = (exponent > 0 ? exponent - 1 : -exponent - 1); - if (index >= kUnitsSize) return ""; - - const char* array = - (exponent > 0 ? (iec ? kBigIECUnits : kBigSIUnits) : kSmallSIUnits); - if (iec) - return array[index] + std::string("i"); - else - return std::string(1, array[index]); -} - -std::string ToBinaryStringFullySpecified(double value, double threshold, - int precision, double one_k = 1024.0) { - std::string mantissa; - int64_t exponent; - ToExponentAndMantissa(value, threshold, precision, one_k, &mantissa, - &exponent); - return mantissa + ExponentToPrefix(exponent, false); -} - -} // end namespace - -void AppendHumanReadable(int n, std::string* str) { - std::stringstream ss; - // Round down to the nearest SI prefix. - ss << ToBinaryStringFullySpecified(n, 1.0, 0); - *str += ss.str(); -} - -std::string HumanReadableNumber(double n, double one_k) { - // 1.1 means that figures up to 1.1k should be shown with the next unit down; - // this softens edge effects. - // 1 means that we should show one decimal place of precision. - return ToBinaryStringFullySpecified(n, 1.1, 1, one_k); -} - -std::string StrFormatImp(const char* msg, va_list args) { - // we might need a second shot at this, so pre-emptivly make a copy - va_list args_cp; - va_copy(args_cp, args); - - // TODO(ericwf): use std::array for first attempt to avoid one memory - // allocation guess what the size might be - std::array local_buff; - std::size_t size = local_buff.size(); - // 2015-10-08: vsnprintf is used instead of snd::vsnprintf due to a limitation - // in the android-ndk - auto ret = vsnprintf(local_buff.data(), size, msg, args_cp); - - va_end(args_cp); - - // handle empty expansion - if (ret == 0) return std::string{}; - if (static_cast(ret) < size) - return std::string(local_buff.data()); - - // we did not provide a long enough buffer on our first attempt. - // add 1 to size to account for null-byte in size cast to prevent overflow - size = static_cast(ret) + 1; - auto buff_ptr = std::unique_ptr(new char[size]); - // 2015-10-08: vsnprintf is used instead of snd::vsnprintf due to a limitation - // in the android-ndk - ret = vsnprintf(buff_ptr.get(), size, msg, args); - return std::string(buff_ptr.get()); -} - -std::string StrFormat(const char* format, ...) { - va_list args; - va_start(args, format); - std::string tmp = StrFormatImp(format, args); - va_end(args); - return tmp; -} - -#ifdef BENCHMARK_STL_ANDROID_GNUSTL -/* - * GNU STL in Android NDK lacks support for some C++11 functions, including - * stoul, stoi, stod. We reimplement them here using C functions strtoul, - * strtol, strtod. Note that reimplemented functions are in benchmark:: - * namespace, not std:: namespace. - */ -unsigned long stoul(const std::string& str, size_t* pos, int base) { - /* Record previous errno */ - const int oldErrno = errno; - errno = 0; - - const char* strStart = str.c_str(); - char* strEnd = const_cast(strStart); - const unsigned long result = strtoul(strStart, &strEnd, base); - - const int strtoulErrno = errno; - /* Restore previous errno */ - errno = oldErrno; - - /* Check for errors and return */ - if (strtoulErrno == ERANGE) { - throw std::out_of_range( - "stoul failed: " + str + " is outside of range of unsigned long"); - } else if (strEnd == strStart || strtoulErrno != 0) { - throw std::invalid_argument( - "stoul failed: " + str + " is not an integer"); - } - if (pos != nullptr) { - *pos = static_cast(strEnd - strStart); - } - return result; -} - -int stoi(const std::string& str, size_t* pos, int base) { - /* Record previous errno */ - const int oldErrno = errno; - errno = 0; - - const char* strStart = str.c_str(); - char* strEnd = const_cast(strStart); - const long result = strtol(strStart, &strEnd, base); - - const int strtolErrno = errno; - /* Restore previous errno */ - errno = oldErrno; - - /* Check for errors and return */ - if (strtolErrno == ERANGE || long(int(result)) != result) { - throw std::out_of_range( - "stoul failed: " + str + " is outside of range of int"); - } else if (strEnd == strStart || strtolErrno != 0) { - throw std::invalid_argument( - "stoul failed: " + str + " is not an integer"); - } - if (pos != nullptr) { - *pos = static_cast(strEnd - strStart); - } - return int(result); -} - -double stod(const std::string& str, size_t* pos) { - /* Record previous errno */ - const int oldErrno = errno; - errno = 0; - - const char* strStart = str.c_str(); - char* strEnd = const_cast(strStart); - const double result = strtod(strStart, &strEnd); - - /* Restore previous errno */ - const int strtodErrno = errno; - errno = oldErrno; - - /* Check for errors and return */ - if (strtodErrno == ERANGE) { - throw std::out_of_range( - "stoul failed: " + str + " is outside of range of int"); - } else if (strEnd == strStart || strtodErrno != 0) { - throw std::invalid_argument( - "stoul failed: " + str + " is not an integer"); - } - if (pos != nullptr) { - *pos = static_cast(strEnd - strStart); - } - return result; -} -#endif - -} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/string_util.h b/benchmarks/thirdparty/benchmark/src/string_util.h deleted file mode 100755 index 09d7b4bd2a..0000000000 --- a/benchmarks/thirdparty/benchmark/src/string_util.h +++ /dev/null @@ -1,59 +0,0 @@ -#ifndef BENCHMARK_STRING_UTIL_H_ -#define BENCHMARK_STRING_UTIL_H_ - -#include -#include -#include -#include "internal_macros.h" - -namespace benchmark { - -void AppendHumanReadable(int n, std::string* str); - -std::string HumanReadableNumber(double n, double one_k = 1024.0); - -#if defined(__MINGW32__) -__attribute__((format(__MINGW_PRINTF_FORMAT, 1, 2))) -#elif defined(__GNUC__) -__attribute__((format(printf, 1, 2))) -#endif -std::string -StrFormat(const char* format, ...); - -inline std::ostream& StrCatImp(std::ostream& out) BENCHMARK_NOEXCEPT { - return out; -} - -template -inline std::ostream& StrCatImp(std::ostream& out, First&& f, Rest&&... rest) { - out << std::forward(f); - return StrCatImp(out, std::forward(rest)...); -} - -template -inline std::string StrCat(Args&&... args) { - std::ostringstream ss; - StrCatImp(ss, std::forward(args)...); - return ss.str(); -} - -#ifdef BENCHMARK_STL_ANDROID_GNUSTL -/* - * GNU STL in Android NDK lacks support for some C++11 functions, including - * stoul, stoi, stod. We reimplement them here using C functions strtoul, - * strtol, strtod. Note that reimplemented functions are in benchmark:: - * namespace, not std:: namespace. - */ -unsigned long stoul(const std::string& str, size_t* pos = nullptr, - int base = 10); -int stoi(const std::string& str, size_t* pos = nullptr, int base = 10); -double stod(const std::string& str, size_t* pos = nullptr); -#else -using std::stoul; -using std::stoi; -using std::stod; -#endif - -} // end namespace benchmark - -#endif // BENCHMARK_STRING_UTIL_H_ diff --git a/benchmarks/thirdparty/benchmark/src/sysinfo.cc b/benchmarks/thirdparty/benchmark/src/sysinfo.cc deleted file mode 100755 index 8bab9320f1..0000000000 --- a/benchmarks/thirdparty/benchmark/src/sysinfo.cc +++ /dev/null @@ -1,712 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "internal_macros.h" - -#ifdef BENCHMARK_OS_WINDOWS -#include -#undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA -#include -#include -#include -#else -#include -#ifndef BENCHMARK_OS_FUCHSIA -#include -#endif -#include -#include // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD -#include -#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX || \ - defined BENCHMARK_OS_NETBSD || defined BENCHMARK_OS_OPENBSD -#define BENCHMARK_HAS_SYSCTL -#include -#endif -#endif -#if defined(BENCHMARK_OS_SOLARIS) -#include -#endif -#if defined(BENCHMARK_OS_QNX) -#include -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "check.h" -#include "cycleclock.h" -#include "internal_macros.h" -#include "log.h" -#include "sleep.h" -#include "string_util.h" - -namespace benchmark { -namespace { - -void PrintImp(std::ostream& out) { out << std::endl; } - -template -void PrintImp(std::ostream& out, First&& f, Rest&&... rest) { - out << std::forward(f); - PrintImp(out, std::forward(rest)...); -} - -template -BENCHMARK_NORETURN void PrintErrorAndDie(Args&&... args) { - PrintImp(std::cerr, std::forward(args)...); - std::exit(EXIT_FAILURE); -} - -#ifdef BENCHMARK_HAS_SYSCTL - -/// ValueUnion - A type used to correctly alias the byte-for-byte output of -/// `sysctl` with the result type it's to be interpreted as. -struct ValueUnion { - union DataT { - uint32_t uint32_value; - uint64_t uint64_value; - // For correct aliasing of union members from bytes. - char bytes[8]; - }; - using DataPtr = std::unique_ptr; - - // The size of the data union member + its trailing array size. - size_t Size; - DataPtr Buff; - - public: - ValueUnion() : Size(0), Buff(nullptr, &std::free) {} - - explicit ValueUnion(size_t BuffSize) - : Size(sizeof(DataT) + BuffSize), - Buff(::new (std::malloc(Size)) DataT(), &std::free) {} - - ValueUnion(ValueUnion&& other) = default; - - explicit operator bool() const { return bool(Buff); } - - char* data() const { return Buff->bytes; } - - std::string GetAsString() const { return std::string(data()); } - - int64_t GetAsInteger() const { - if (Size == sizeof(Buff->uint32_value)) - return static_cast(Buff->uint32_value); - else if (Size == sizeof(Buff->uint64_value)) - return static_cast(Buff->uint64_value); - BENCHMARK_UNREACHABLE(); - } - - uint64_t GetAsUnsigned() const { - if (Size == sizeof(Buff->uint32_value)) - return Buff->uint32_value; - else if (Size == sizeof(Buff->uint64_value)) - return Buff->uint64_value; - BENCHMARK_UNREACHABLE(); - } - - template - std::array GetAsArray() { - const int ArrSize = sizeof(T) * N; - CHECK_LE(ArrSize, Size); - std::array Arr; - std::memcpy(Arr.data(), data(), ArrSize); - return Arr; - } -}; - -ValueUnion GetSysctlImp(std::string const& Name) { -#if defined BENCHMARK_OS_OPENBSD - int mib[2]; - - mib[0] = CTL_HW; - if ((Name == "hw.ncpu") || (Name == "hw.cpuspeed")){ - ValueUnion buff(sizeof(int)); - - if (Name == "hw.ncpu") { - mib[1] = HW_NCPU; - } else { - mib[1] = HW_CPUSPEED; - } - - if (sysctl(mib, 2, buff.data(), &buff.Size, nullptr, 0) == -1) { - return ValueUnion(); - } - return buff; - } - return ValueUnion(); -#else - size_t CurBuffSize = 0; - if (sysctlbyname(Name.c_str(), nullptr, &CurBuffSize, nullptr, 0) == -1) - return ValueUnion(); - - ValueUnion buff(CurBuffSize); - if (sysctlbyname(Name.c_str(), buff.data(), &buff.Size, nullptr, 0) == 0) - return buff; - return ValueUnion(); -#endif -} - -BENCHMARK_MAYBE_UNUSED -bool GetSysctl(std::string const& Name, std::string* Out) { - Out->clear(); - auto Buff = GetSysctlImp(Name); - if (!Buff) return false; - Out->assign(Buff.data()); - return true; -} - -template ::value>::type> -bool GetSysctl(std::string const& Name, Tp* Out) { - *Out = 0; - auto Buff = GetSysctlImp(Name); - if (!Buff) return false; - *Out = static_cast(Buff.GetAsUnsigned()); - return true; -} - -template -bool GetSysctl(std::string const& Name, std::array* Out) { - auto Buff = GetSysctlImp(Name); - if (!Buff) return false; - *Out = Buff.GetAsArray(); - return true; -} -#endif - -template -bool ReadFromFile(std::string const& fname, ArgT* arg) { - *arg = ArgT(); - std::ifstream f(fname.c_str()); - if (!f.is_open()) return false; - f >> *arg; - return f.good(); -} - -CPUInfo::Scaling CpuScaling(int num_cpus) { - // We don't have a valid CPU count, so don't even bother. - if (num_cpus <= 0) return CPUInfo::Scaling::UNKNOWN; -#ifdef BENCHMARK_OS_QNX - return CPUInfo::Scaling::UNKNOWN; -#endif -#ifndef BENCHMARK_OS_WINDOWS - // On Linux, the CPUfreq subsystem exposes CPU information as files on the - // local file system. If reading the exported files fails, then we may not be - // running on Linux, so we silently ignore all the read errors. - std::string res; - for (int cpu = 0; cpu < num_cpus; ++cpu) { - std::string governor_file = - StrCat("/sys/devices/system/cpu/cpu", cpu, "/cpufreq/scaling_governor"); - if (ReadFromFile(governor_file, &res) && res != "performance") return CPUInfo::Scaling::ENABLED; - } - return CPUInfo::Scaling::DISABLED; -#endif - return CPUInfo::Scaling::UNKNOWN; -} - -int CountSetBitsInCPUMap(std::string Val) { - auto CountBits = [](std::string Part) { - using CPUMask = std::bitset; - Part = "0x" + Part; - CPUMask Mask(benchmark::stoul(Part, nullptr, 16)); - return static_cast(Mask.count()); - }; - size_t Pos; - int total = 0; - while ((Pos = Val.find(',')) != std::string::npos) { - total += CountBits(Val.substr(0, Pos)); - Val = Val.substr(Pos + 1); - } - if (!Val.empty()) { - total += CountBits(Val); - } - return total; -} - -BENCHMARK_MAYBE_UNUSED -std::vector GetCacheSizesFromKVFS() { - std::vector res; - std::string dir = "/sys/devices/system/cpu/cpu0/cache/"; - int Idx = 0; - while (true) { - CPUInfo::CacheInfo info; - std::string FPath = StrCat(dir, "index", Idx++, "/"); - std::ifstream f(StrCat(FPath, "size").c_str()); - if (!f.is_open()) break; - std::string suffix; - f >> info.size; - if (f.fail()) - PrintErrorAndDie("Failed while reading file '", FPath, "size'"); - if (f.good()) { - f >> suffix; - if (f.bad()) - PrintErrorAndDie( - "Invalid cache size format: failed to read size suffix"); - else if (f && suffix != "K") - PrintErrorAndDie("Invalid cache size format: Expected bytes ", suffix); - else if (suffix == "K") - info.size *= 1024; - } - if (!ReadFromFile(StrCat(FPath, "type"), &info.type)) - PrintErrorAndDie("Failed to read from file ", FPath, "type"); - if (!ReadFromFile(StrCat(FPath, "level"), &info.level)) - PrintErrorAndDie("Failed to read from file ", FPath, "level"); - std::string map_str; - if (!ReadFromFile(StrCat(FPath, "shared_cpu_map"), &map_str)) - PrintErrorAndDie("Failed to read from file ", FPath, "shared_cpu_map"); - info.num_sharing = CountSetBitsInCPUMap(map_str); - res.push_back(info); - } - - return res; -} - -#ifdef BENCHMARK_OS_MACOSX -std::vector GetCacheSizesMacOSX() { - std::vector res; - std::array CacheCounts{{0, 0, 0, 0}}; - GetSysctl("hw.cacheconfig", &CacheCounts); - - struct { - std::string name; - std::string type; - int level; - uint64_t num_sharing; - } Cases[] = {{"hw.l1dcachesize", "Data", 1, CacheCounts[1]}, - {"hw.l1icachesize", "Instruction", 1, CacheCounts[1]}, - {"hw.l2cachesize", "Unified", 2, CacheCounts[2]}, - {"hw.l3cachesize", "Unified", 3, CacheCounts[3]}}; - for (auto& C : Cases) { - int val; - if (!GetSysctl(C.name, &val)) continue; - CPUInfo::CacheInfo info; - info.type = C.type; - info.level = C.level; - info.size = val; - info.num_sharing = static_cast(C.num_sharing); - res.push_back(std::move(info)); - } - return res; -} -#elif defined(BENCHMARK_OS_WINDOWS) -std::vector GetCacheSizesWindows() { - std::vector res; - DWORD buffer_size = 0; - using PInfo = SYSTEM_LOGICAL_PROCESSOR_INFORMATION; - using CInfo = CACHE_DESCRIPTOR; - - using UPtr = std::unique_ptr; - GetLogicalProcessorInformation(nullptr, &buffer_size); - UPtr buff((PInfo*)malloc(buffer_size), &std::free); - if (!GetLogicalProcessorInformation(buff.get(), &buffer_size)) - PrintErrorAndDie("Failed during call to GetLogicalProcessorInformation: ", - GetLastError()); - - PInfo* it = buff.get(); - PInfo* end = buff.get() + (buffer_size / sizeof(PInfo)); - - for (; it != end; ++it) { - if (it->Relationship != RelationCache) continue; - using BitSet = std::bitset; - BitSet B(it->ProcessorMask); - // To prevent duplicates, only consider caches where CPU 0 is specified - if (!B.test(0)) continue; - CInfo* Cache = &it->Cache; - CPUInfo::CacheInfo C; - C.num_sharing = static_cast(B.count()); - C.level = Cache->Level; - C.size = Cache->Size; - switch (Cache->Type) { - case CacheUnified: - C.type = "Unified"; - break; - case CacheInstruction: - C.type = "Instruction"; - break; - case CacheData: - C.type = "Data"; - break; - case CacheTrace: - C.type = "Trace"; - break; - default: - C.type = "Unknown"; - break; - } - res.push_back(C); - } - return res; -} -#elif BENCHMARK_OS_QNX -std::vector GetCacheSizesQNX() { - std::vector res; - struct cacheattr_entry *cache = SYSPAGE_ENTRY(cacheattr); - uint32_t const elsize = SYSPAGE_ELEMENT_SIZE(cacheattr); - int num = SYSPAGE_ENTRY_SIZE(cacheattr) / elsize ; - for(int i = 0; i < num; ++i ) { - CPUInfo::CacheInfo info; - switch (cache->flags){ - case CACHE_FLAG_INSTR : - info.type = "Instruction"; - info.level = 1; - break; - case CACHE_FLAG_DATA : - info.type = "Data"; - info.level = 1; - break; - case CACHE_FLAG_UNIFIED : - info.type = "Unified"; - info.level = 2; - break; - case CACHE_FLAG_SHARED : - info.type = "Shared"; - info.level = 3; - break; - default : - continue; - break; - } - info.size = cache->line_size * cache->num_lines; - info.num_sharing = 0; - res.push_back(std::move(info)); - cache = SYSPAGE_ARRAY_ADJ_OFFSET(cacheattr, cache, elsize); - } - return res; -} -#endif - -std::vector GetCacheSizes() { -#ifdef BENCHMARK_OS_MACOSX - return GetCacheSizesMacOSX(); -#elif defined(BENCHMARK_OS_WINDOWS) - return GetCacheSizesWindows(); -#elif defined(BENCHMARK_OS_QNX) - return GetCacheSizesQNX(); -#else - return GetCacheSizesFromKVFS(); -#endif -} - -std::string GetSystemName() { -#if defined(BENCHMARK_OS_WINDOWS) - std::string str; - const unsigned COUNT = MAX_COMPUTERNAME_LENGTH+1; - TCHAR hostname[COUNT] = {'\0'}; - DWORD DWCOUNT = COUNT; - if (!GetComputerName(hostname, &DWCOUNT)) - return std::string(""); -#ifndef UNICODE - str = std::string(hostname, DWCOUNT); -#else - //Using wstring_convert, Is deprecated in C++17 - using convert_type = std::codecvt_utf8; - std::wstring_convert converter; - std::wstring wStr(hostname, DWCOUNT); - str = converter.to_bytes(wStr); -#endif - return str; -#else // defined(BENCHMARK_OS_WINDOWS) -#ifndef HOST_NAME_MAX -#ifdef BENCHMARK_HAS_SYSCTL // BSD/Mac Doesnt have HOST_NAME_MAX defined -#define HOST_NAME_MAX 64 -#elif defined(BENCHMARK_OS_NACL) -#define HOST_NAME_MAX 64 -#elif defined(BENCHMARK_OS_QNX) -#define HOST_NAME_MAX 154 -#elif defined(BENCHMARK_OS_RTEMS) -#define HOST_NAME_MAX 256 -#else -#warning "HOST_NAME_MAX not defined. using 64" -#define HOST_NAME_MAX 64 -#endif -#endif // def HOST_NAME_MAX - char hostname[HOST_NAME_MAX]; - int retVal = gethostname(hostname, HOST_NAME_MAX); - if (retVal != 0) return std::string(""); - return std::string(hostname); -#endif // Catch-all POSIX block. -} - -int GetNumCPUs() { -#ifdef BENCHMARK_HAS_SYSCTL - int NumCPU = -1; - if (GetSysctl("hw.ncpu", &NumCPU)) return NumCPU; - fprintf(stderr, "Err: %s\n", strerror(errno)); - std::exit(EXIT_FAILURE); -#elif defined(BENCHMARK_OS_WINDOWS) - SYSTEM_INFO sysinfo; - // Use memset as opposed to = {} to avoid GCC missing initializer false - // positives. - std::memset(&sysinfo, 0, sizeof(SYSTEM_INFO)); - GetSystemInfo(&sysinfo); - return sysinfo.dwNumberOfProcessors; // number of logical - // processors in the current - // group -#elif defined(BENCHMARK_OS_SOLARIS) - // Returns -1 in case of a failure. - int NumCPU = sysconf(_SC_NPROCESSORS_ONLN); - if (NumCPU < 0) { - fprintf(stderr, - "sysconf(_SC_NPROCESSORS_ONLN) failed with error: %s\n", - strerror(errno)); - } - return NumCPU; -#elif defined(BENCHMARK_OS_QNX) - return static_cast(_syspage_ptr->num_cpu); -#else - int NumCPUs = 0; - int MaxID = -1; - std::ifstream f("/proc/cpuinfo"); - if (!f.is_open()) { - std::cerr << "failed to open /proc/cpuinfo\n"; - return -1; - } - const std::string Key = "processor"; - std::string ln; - while (std::getline(f, ln)) { - if (ln.empty()) continue; - size_t SplitIdx = ln.find(':'); - std::string value; -#if defined(__s390__) - // s390 has another format in /proc/cpuinfo - // it needs to be parsed differently - if (SplitIdx != std::string::npos) value = ln.substr(Key.size()+1,SplitIdx-Key.size()-1); -#else - if (SplitIdx != std::string::npos) value = ln.substr(SplitIdx + 1); -#endif - if (ln.size() >= Key.size() && ln.compare(0, Key.size(), Key) == 0) { - NumCPUs++; - if (!value.empty()) { - int CurID = benchmark::stoi(value); - MaxID = std::max(CurID, MaxID); - } - } - } - if (f.bad()) { - std::cerr << "Failure reading /proc/cpuinfo\n"; - return -1; - } - if (!f.eof()) { - std::cerr << "Failed to read to end of /proc/cpuinfo\n"; - return -1; - } - f.close(); - - if ((MaxID + 1) != NumCPUs) { - fprintf(stderr, - "CPU ID assignments in /proc/cpuinfo seem messed up." - " This is usually caused by a bad BIOS.\n"); - } - return NumCPUs; -#endif - BENCHMARK_UNREACHABLE(); -} - -double GetCPUCyclesPerSecond() { -#if defined BENCHMARK_OS_LINUX || defined BENCHMARK_OS_CYGWIN - long freq; - - // If the kernel is exporting the tsc frequency use that. There are issues - // where cpuinfo_max_freq cannot be relied on because the BIOS may be - // exporintg an invalid p-state (on x86) or p-states may be used to put the - // processor in a new mode (turbo mode). Essentially, those frequencies - // cannot always be relied upon. The same reasons apply to /proc/cpuinfo as - // well. - if (ReadFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz", &freq) - // If CPU scaling is in effect, we want to use the *maximum* frequency, - // not whatever CPU speed some random processor happens to be using now. - || ReadFromFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq", - &freq)) { - // The value is in kHz (as the file name suggests). For example, on a - // 2GHz warpstation, the file contains the value "2000000". - return freq * 1000.0; - } - - const double error_value = -1; - double bogo_clock = error_value; - - std::ifstream f("/proc/cpuinfo"); - if (!f.is_open()) { - std::cerr << "failed to open /proc/cpuinfo\n"; - return error_value; - } - - auto startsWithKey = [](std::string const& Value, std::string const& Key) { - if (Key.size() > Value.size()) return false; - auto Cmp = [&](char X, char Y) { - return std::tolower(X) == std::tolower(Y); - }; - return std::equal(Key.begin(), Key.end(), Value.begin(), Cmp); - }; - - std::string ln; - while (std::getline(f, ln)) { - if (ln.empty()) continue; - size_t SplitIdx = ln.find(':'); - std::string value; - if (SplitIdx != std::string::npos) value = ln.substr(SplitIdx + 1); - // When parsing the "cpu MHz" and "bogomips" (fallback) entries, we only - // accept positive values. Some environments (virtual machines) report zero, - // which would cause infinite looping in WallTime_Init. - if (startsWithKey(ln, "cpu MHz")) { - if (!value.empty()) { - double cycles_per_second = benchmark::stod(value) * 1000000.0; - if (cycles_per_second > 0) return cycles_per_second; - } - } else if (startsWithKey(ln, "bogomips")) { - if (!value.empty()) { - bogo_clock = benchmark::stod(value) * 1000000.0; - if (bogo_clock < 0.0) bogo_clock = error_value; - } - } - } - if (f.bad()) { - std::cerr << "Failure reading /proc/cpuinfo\n"; - return error_value; - } - if (!f.eof()) { - std::cerr << "Failed to read to end of /proc/cpuinfo\n"; - return error_value; - } - f.close(); - // If we found the bogomips clock, but nothing better, we'll use it (but - // we're not happy about it); otherwise, fallback to the rough estimation - // below. - if (bogo_clock >= 0.0) return bogo_clock; - -#elif defined BENCHMARK_HAS_SYSCTL - constexpr auto* FreqStr = -#if defined(BENCHMARK_OS_FREEBSD) || defined(BENCHMARK_OS_NETBSD) - "machdep.tsc_freq"; -#elif defined BENCHMARK_OS_OPENBSD - "hw.cpuspeed"; -#else - "hw.cpufrequency"; -#endif - unsigned long long hz = 0; -#if defined BENCHMARK_OS_OPENBSD - if (GetSysctl(FreqStr, &hz)) return hz * 1000000; -#else - if (GetSysctl(FreqStr, &hz)) return hz; -#endif - fprintf(stderr, "Unable to determine clock rate from sysctl: %s: %s\n", - FreqStr, strerror(errno)); - -#elif defined BENCHMARK_OS_WINDOWS - // In NT, read MHz from the registry. If we fail to do so or we're in win9x - // then make a crude estimate. - DWORD data, data_size = sizeof(data); - if (IsWindowsXPOrGreater() && - SUCCEEDED( - SHGetValueA(HKEY_LOCAL_MACHINE, - "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", - "~MHz", nullptr, &data, &data_size))) - return static_cast((int64_t)data * - (int64_t)(1000 * 1000)); // was mhz -#elif defined (BENCHMARK_OS_SOLARIS) - kstat_ctl_t *kc = kstat_open(); - if (!kc) { - std::cerr << "failed to open /dev/kstat\n"; - return -1; - } - kstat_t *ksp = kstat_lookup(kc, (char*)"cpu_info", -1, (char*)"cpu_info0"); - if (!ksp) { - std::cerr << "failed to lookup in /dev/kstat\n"; - return -1; - } - if (kstat_read(kc, ksp, NULL) < 0) { - std::cerr << "failed to read from /dev/kstat\n"; - return -1; - } - kstat_named_t *knp = - (kstat_named_t*)kstat_data_lookup(ksp, (char*)"current_clock_Hz"); - if (!knp) { - std::cerr << "failed to lookup data in /dev/kstat\n"; - return -1; - } - if (knp->data_type != KSTAT_DATA_UINT64) { - std::cerr << "current_clock_Hz is of unexpected data type: " - << knp->data_type << "\n"; - return -1; - } - double clock_hz = knp->value.ui64; - kstat_close(kc); - return clock_hz; -#elif defined (BENCHMARK_OS_QNX) - return static_cast((int64_t)(SYSPAGE_ENTRY(cpuinfo)->speed) * - (int64_t)(1000 * 1000)); -#endif - // If we've fallen through, attempt to roughly estimate the CPU clock rate. - const int estimate_time_ms = 1000; - const auto start_ticks = cycleclock::Now(); - SleepForMilliseconds(estimate_time_ms); - return static_cast(cycleclock::Now() - start_ticks); -} - -std::vector GetLoadAvg() { -#if (defined BENCHMARK_OS_FREEBSD || defined(BENCHMARK_OS_LINUX) || \ - defined BENCHMARK_OS_MACOSX || defined BENCHMARK_OS_NETBSD || \ - defined BENCHMARK_OS_OPENBSD) && !defined(__ANDROID__) - constexpr int kMaxSamples = 3; - std::vector res(kMaxSamples, 0.0); - const int nelem = getloadavg(res.data(), kMaxSamples); - if (nelem < 1) { - res.clear(); - } else { - res.resize(nelem); - } - return res; -#else - return {}; -#endif -} - -} // end namespace - -const CPUInfo& CPUInfo::Get() { - static const CPUInfo* info = new CPUInfo(); - return *info; -} - -CPUInfo::CPUInfo() - : num_cpus(GetNumCPUs()), - cycles_per_second(GetCPUCyclesPerSecond()), - caches(GetCacheSizes()), - scaling(CpuScaling(num_cpus)), - load_avg(GetLoadAvg()) {} - - -const SystemInfo& SystemInfo::Get() { - static const SystemInfo* info = new SystemInfo(); - return *info; -} - -SystemInfo::SystemInfo() : name(GetSystemName()) {} -} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/thread_manager.h b/benchmarks/thirdparty/benchmark/src/thread_manager.h deleted file mode 100755 index 28e2dd53af..0000000000 --- a/benchmarks/thirdparty/benchmark/src/thread_manager.h +++ /dev/null @@ -1,64 +0,0 @@ -#ifndef BENCHMARK_THREAD_MANAGER_H -#define BENCHMARK_THREAD_MANAGER_H - -#include - -#include "benchmark/benchmark.h" -#include "mutex.h" - -namespace benchmark { -namespace internal { - -class ThreadManager { - public: - explicit ThreadManager(int num_threads) - : alive_threads_(num_threads), start_stop_barrier_(num_threads) {} - - Mutex& GetBenchmarkMutex() const RETURN_CAPABILITY(benchmark_mutex_) { - return benchmark_mutex_; - } - - bool StartStopBarrier() EXCLUDES(end_cond_mutex_) { - return start_stop_barrier_.wait(); - } - - void NotifyThreadComplete() EXCLUDES(end_cond_mutex_) { - start_stop_barrier_.removeThread(); - if (--alive_threads_ == 0) { - MutexLock lock(end_cond_mutex_); - end_condition_.notify_all(); - } - } - - void WaitForAllThreads() EXCLUDES(end_cond_mutex_) { - MutexLock lock(end_cond_mutex_); - end_condition_.wait(lock.native_handle(), - [this]() { return alive_threads_ == 0; }); - } - - public: - struct Result { - IterationCount iterations = 0; - double real_time_used = 0; - double cpu_time_used = 0; - double manual_time_used = 0; - int64_t complexity_n = 0; - std::string report_label_; - std::string error_message_; - bool has_error_ = false; - UserCounters counters; - }; - GUARDED_BY(GetBenchmarkMutex()) Result results; - - private: - mutable Mutex benchmark_mutex_; - std::atomic alive_threads_; - Barrier start_stop_barrier_; - Mutex end_cond_mutex_; - Condition end_condition_; -}; - -} // namespace internal -} // namespace benchmark - -#endif // BENCHMARK_THREAD_MANAGER_H diff --git a/benchmarks/thirdparty/benchmark/src/thread_timer.h b/benchmarks/thirdparty/benchmark/src/thread_timer.h deleted file mode 100755 index 1703ca0d6f..0000000000 --- a/benchmarks/thirdparty/benchmark/src/thread_timer.h +++ /dev/null @@ -1,86 +0,0 @@ -#ifndef BENCHMARK_THREAD_TIMER_H -#define BENCHMARK_THREAD_TIMER_H - -#include "check.h" -#include "timers.h" - -namespace benchmark { -namespace internal { - -class ThreadTimer { - explicit ThreadTimer(bool measure_process_cpu_time_) - : measure_process_cpu_time(measure_process_cpu_time_) {} - - public: - static ThreadTimer Create() { - return ThreadTimer(/*measure_process_cpu_time_=*/false); - } - static ThreadTimer CreateProcessCpuTime() { - return ThreadTimer(/*measure_process_cpu_time_=*/true); - } - - // Called by each thread - void StartTimer() { - running_ = true; - start_real_time_ = ChronoClockNow(); - start_cpu_time_ = ReadCpuTimerOfChoice(); - } - - // Called by each thread - void StopTimer() { - CHECK(running_); - running_ = false; - real_time_used_ += ChronoClockNow() - start_real_time_; - // Floating point error can result in the subtraction producing a negative - // time. Guard against that. - cpu_time_used_ += - std::max(ReadCpuTimerOfChoice() - start_cpu_time_, 0); - } - - // Called by each thread - void SetIterationTime(double seconds) { manual_time_used_ += seconds; } - - bool running() const { return running_; } - - // REQUIRES: timer is not running - double real_time_used() const { - CHECK(!running_); - return real_time_used_; - } - - // REQUIRES: timer is not running - double cpu_time_used() const { - CHECK(!running_); - return cpu_time_used_; - } - - // REQUIRES: timer is not running - double manual_time_used() const { - CHECK(!running_); - return manual_time_used_; - } - - private: - double ReadCpuTimerOfChoice() const { - if (measure_process_cpu_time) return ProcessCPUUsage(); - return ThreadCPUUsage(); - } - - // should the thread, or the process, time be measured? - const bool measure_process_cpu_time; - - bool running_ = false; // Is the timer running - double start_real_time_ = 0; // If running_ - double start_cpu_time_ = 0; // If running_ - - // Accumulated time so far (does not contain current slice if running_) - double real_time_used_ = 0; - double cpu_time_used_ = 0; - // Manually set iteration time. User sets this with SetIterationTime(seconds). - double manual_time_used_ = 0; -}; - -} // namespace internal -} // namespace benchmark - -#endif // BENCHMARK_THREAD_TIMER_H diff --git a/benchmarks/thirdparty/benchmark/src/timers.cc b/benchmarks/thirdparty/benchmark/src/timers.cc deleted file mode 100755 index 4f76eddc1d..0000000000 --- a/benchmarks/thirdparty/benchmark/src/timers.cc +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "timers.h" -#include "internal_macros.h" - -#ifdef BENCHMARK_OS_WINDOWS -#include -#undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA -#include -#include -#else -#include -#ifndef BENCHMARK_OS_FUCHSIA -#include -#endif -#include -#include // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD -#include -#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX -#include -#endif -#if defined(BENCHMARK_OS_MACOSX) -#include -#include -#include -#endif -#endif - -#ifdef BENCHMARK_OS_EMSCRIPTEN -#include -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "check.h" -#include "log.h" -#include "sleep.h" -#include "string_util.h" - -namespace benchmark { - -// Suppress unused warnings on helper functions. -#if defined(__GNUC__) -#pragma GCC diagnostic ignored "-Wunused-function" -#endif - -namespace { -#if defined(BENCHMARK_OS_WINDOWS) -double MakeTime(FILETIME const& kernel_time, FILETIME const& user_time) { - ULARGE_INTEGER kernel; - ULARGE_INTEGER user; - kernel.HighPart = kernel_time.dwHighDateTime; - kernel.LowPart = kernel_time.dwLowDateTime; - user.HighPart = user_time.dwHighDateTime; - user.LowPart = user_time.dwLowDateTime; - return (static_cast(kernel.QuadPart) + - static_cast(user.QuadPart)) * - 1e-7; -} -#elif !defined(BENCHMARK_OS_FUCHSIA) -double MakeTime(struct rusage const& ru) { - return (static_cast(ru.ru_utime.tv_sec) + - static_cast(ru.ru_utime.tv_usec) * 1e-6 + - static_cast(ru.ru_stime.tv_sec) + - static_cast(ru.ru_stime.tv_usec) * 1e-6); -} -#endif -#if defined(BENCHMARK_OS_MACOSX) -double MakeTime(thread_basic_info_data_t const& info) { - return (static_cast(info.user_time.seconds) + - static_cast(info.user_time.microseconds) * 1e-6 + - static_cast(info.system_time.seconds) + - static_cast(info.system_time.microseconds) * 1e-6); -} -#endif -#if defined(CLOCK_PROCESS_CPUTIME_ID) || defined(CLOCK_THREAD_CPUTIME_ID) -double MakeTime(struct timespec const& ts) { - return ts.tv_sec + (static_cast(ts.tv_nsec) * 1e-9); -} -#endif - -BENCHMARK_NORETURN static void DiagnoseAndExit(const char* msg) { - std::cerr << "ERROR: " << msg << std::endl; - std::exit(EXIT_FAILURE); -} - -} // end namespace - -double ProcessCPUUsage() { -#if defined(BENCHMARK_OS_WINDOWS) - HANDLE proc = GetCurrentProcess(); - FILETIME creation_time; - FILETIME exit_time; - FILETIME kernel_time; - FILETIME user_time; - if (GetProcessTimes(proc, &creation_time, &exit_time, &kernel_time, - &user_time)) - return MakeTime(kernel_time, user_time); - DiagnoseAndExit("GetProccessTimes() failed"); -#elif defined(BENCHMARK_OS_EMSCRIPTEN) - // clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) returns 0 on Emscripten. - // Use Emscripten-specific API. Reported CPU time would be exactly the - // same as total time, but this is ok because there aren't long-latency - // syncronous system calls in Emscripten. - return emscripten_get_now() * 1e-3; -#elif defined(CLOCK_PROCESS_CPUTIME_ID) && !defined(BENCHMARK_OS_MACOSX) - // FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See - // https://github.com/google/benchmark/pull/292 - struct timespec spec; - if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &spec) == 0) - return MakeTime(spec); - DiagnoseAndExit("clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) failed"); -#else - struct rusage ru; - if (getrusage(RUSAGE_SELF, &ru) == 0) return MakeTime(ru); - DiagnoseAndExit("getrusage(RUSAGE_SELF, ...) failed"); -#endif -} - -double ThreadCPUUsage() { -#if defined(BENCHMARK_OS_WINDOWS) - HANDLE this_thread = GetCurrentThread(); - FILETIME creation_time; - FILETIME exit_time; - FILETIME kernel_time; - FILETIME user_time; - GetThreadTimes(this_thread, &creation_time, &exit_time, &kernel_time, - &user_time); - return MakeTime(kernel_time, user_time); -#elif defined(BENCHMARK_OS_MACOSX) - // FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See - // https://github.com/google/benchmark/pull/292 - mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT; - thread_basic_info_data_t info; - mach_port_t thread = pthread_mach_thread_np(pthread_self()); - if (thread_info(thread, THREAD_BASIC_INFO, (thread_info_t)&info, &count) == - KERN_SUCCESS) { - return MakeTime(info); - } - DiagnoseAndExit("ThreadCPUUsage() failed when evaluating thread_info"); -#elif defined(BENCHMARK_OS_EMSCRIPTEN) - // Emscripten doesn't support traditional threads - return ProcessCPUUsage(); -#elif defined(BENCHMARK_OS_RTEMS) - // RTEMS doesn't support CLOCK_THREAD_CPUTIME_ID. See - // https://github.com/RTEMS/rtems/blob/master/cpukit/posix/src/clockgettime.c - return ProcessCPUUsage(); -#elif defined(BENCHMARK_OS_SOLARIS) - struct rusage ru; - if (getrusage(RUSAGE_LWP, &ru) == 0) return MakeTime(ru); - DiagnoseAndExit("getrusage(RUSAGE_LWP, ...) failed"); -#elif defined(CLOCK_THREAD_CPUTIME_ID) - struct timespec ts; - if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts) == 0) return MakeTime(ts); - DiagnoseAndExit("clock_gettime(CLOCK_THREAD_CPUTIME_ID, ...) failed"); -#else -#error Per-thread timing is not available on your system. -#endif -} - -std::string LocalDateTimeString() { - // Write the local time in RFC3339 format yyyy-mm-ddTHH:MM:SS+/-HH:MM. - typedef std::chrono::system_clock Clock; - std::time_t now = Clock::to_time_t(Clock::now()); - const std::size_t kTzOffsetLen = 6; - const std::size_t kTimestampLen = 19; - - std::size_t tz_len; - std::size_t timestamp_len; - long int offset_minutes; - char tz_offset_sign = '+'; - // Long enough buffers to avoid format-overflow warnings - char tz_offset[128]; - char storage[128]; - -#if defined(BENCHMARK_OS_WINDOWS) - std::tm *timeinfo_p = ::localtime(&now); -#else - std::tm timeinfo; - std::tm *timeinfo_p = &timeinfo; - ::localtime_r(&now, &timeinfo); -#endif - - tz_len = std::strftime(tz_offset, sizeof(tz_offset), "%z", timeinfo_p); - - if (tz_len < kTzOffsetLen && tz_len > 1) { - // Timezone offset was written. strftime writes offset as +HHMM or -HHMM, - // RFC3339 specifies an offset as +HH:MM or -HH:MM. To convert, we parse - // the offset as an integer, then reprint it to a string. - - offset_minutes = ::strtol(tz_offset, NULL, 10); - if (offset_minutes < 0) { - offset_minutes *= -1; - tz_offset_sign = '-'; - } - - tz_len = ::snprintf(tz_offset, sizeof(tz_offset), "%c%02li:%02li", - tz_offset_sign, offset_minutes / 100, offset_minutes % 100); - CHECK(tz_len == kTzOffsetLen); - ((void)tz_len); // Prevent unused variable warning in optimized build. - } else { - // Unknown offset. RFC3339 specifies that unknown local offsets should be - // written as UTC time with -00:00 timezone. -#if defined(BENCHMARK_OS_WINDOWS) - // Potential race condition if another thread calls localtime or gmtime. - timeinfo_p = ::gmtime(&now); -#else - ::gmtime_r(&now, &timeinfo); -#endif - - strncpy(tz_offset, "-00:00", kTzOffsetLen + 1); - } - - timestamp_len = std::strftime(storage, sizeof(storage), "%Y-%m-%dT%H:%M:%S", - timeinfo_p); - CHECK(timestamp_len == kTimestampLen); - // Prevent unused variable warning in optimized build. - ((void)kTimestampLen); - - std::strncat(storage, tz_offset, sizeof(storage) - timestamp_len - 1); - return std::string(storage); -} - -} // end namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/src/timers.h b/benchmarks/thirdparty/benchmark/src/timers.h deleted file mode 100755 index 65606ccd93..0000000000 --- a/benchmarks/thirdparty/benchmark/src/timers.h +++ /dev/null @@ -1,48 +0,0 @@ -#ifndef BENCHMARK_TIMERS_H -#define BENCHMARK_TIMERS_H - -#include -#include - -namespace benchmark { - -// Return the CPU usage of the current process -double ProcessCPUUsage(); - -// Return the CPU usage of the children of the current process -double ChildrenCPUUsage(); - -// Return the CPU usage of the current thread -double ThreadCPUUsage(); - -#if defined(HAVE_STEADY_CLOCK) -template -struct ChooseSteadyClock { - typedef std::chrono::high_resolution_clock type; -}; - -template <> -struct ChooseSteadyClock { - typedef std::chrono::steady_clock type; -}; -#endif - -struct ChooseClockType { -#if defined(HAVE_STEADY_CLOCK) - typedef ChooseSteadyClock<>::type type; -#else - typedef std::chrono::high_resolution_clock type; -#endif -}; - -inline double ChronoClockNow() { - typedef ChooseClockType::type ClockType; - using FpSeconds = std::chrono::duration; - return FpSeconds(ClockType::now().time_since_epoch()).count(); -} - -std::string LocalDateTimeString(); - -} // end namespace benchmark - -#endif // BENCHMARK_TIMERS_H diff --git a/benchmarks/thirdparty/benchmark/test/AssemblyTests.cmake b/benchmarks/thirdparty/benchmark/test/AssemblyTests.cmake deleted file mode 100755 index 3d078586f1..0000000000 --- a/benchmarks/thirdparty/benchmark/test/AssemblyTests.cmake +++ /dev/null @@ -1,46 +0,0 @@ - -include(split_list) - -set(ASM_TEST_FLAGS "") -check_cxx_compiler_flag(-O3 BENCHMARK_HAS_O3_FLAG) -if (BENCHMARK_HAS_O3_FLAG) - list(APPEND ASM_TEST_FLAGS -O3) -endif() - -check_cxx_compiler_flag(-g0 BENCHMARK_HAS_G0_FLAG) -if (BENCHMARK_HAS_G0_FLAG) - list(APPEND ASM_TEST_FLAGS -g0) -endif() - -check_cxx_compiler_flag(-fno-stack-protector BENCHMARK_HAS_FNO_STACK_PROTECTOR_FLAG) -if (BENCHMARK_HAS_FNO_STACK_PROTECTOR_FLAG) - list(APPEND ASM_TEST_FLAGS -fno-stack-protector) -endif() - -split_list(ASM_TEST_FLAGS) -string(TOUPPER "${CMAKE_CXX_COMPILER_ID}" ASM_TEST_COMPILER) - -macro(add_filecheck_test name) - cmake_parse_arguments(ARG "" "" "CHECK_PREFIXES" ${ARGV}) - add_library(${name} OBJECT ${name}.cc) - set_target_properties(${name} PROPERTIES COMPILE_FLAGS "-S ${ASM_TEST_FLAGS}") - set(ASM_OUTPUT_FILE "${CMAKE_CURRENT_BINARY_DIR}/${name}.s") - add_custom_target(copy_${name} ALL - COMMAND ${PROJECT_SOURCE_DIR}/tools/strip_asm.py - $ - ${ASM_OUTPUT_FILE} - BYPRODUCTS ${ASM_OUTPUT_FILE}) - add_dependencies(copy_${name} ${name}) - if (NOT ARG_CHECK_PREFIXES) - set(ARG_CHECK_PREFIXES "CHECK") - endif() - foreach(prefix ${ARG_CHECK_PREFIXES}) - add_test(NAME run_${name}_${prefix} - COMMAND - ${LLVM_FILECHECK_EXE} ${name}.cc - --input-file=${ASM_OUTPUT_FILE} - --check-prefixes=CHECK,CHECK-${ASM_TEST_COMPILER} - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) - endforeach() -endmacro() - diff --git a/benchmarks/thirdparty/benchmark/test/CMakeLists.txt b/benchmarks/thirdparty/benchmark/test/CMakeLists.txt deleted file mode 100755 index c1a3a3fc19..0000000000 --- a/benchmarks/thirdparty/benchmark/test/CMakeLists.txt +++ /dev/null @@ -1,263 +0,0 @@ -# Enable the tests - -find_package(Threads REQUIRED) -include(CheckCXXCompilerFlag) - -# NOTE: Some tests use `` to perform the test. Therefore we must -# strip -DNDEBUG from the default CMake flags in DEBUG mode. -string(TOUPPER "${CMAKE_BUILD_TYPE}" uppercase_CMAKE_BUILD_TYPE) -if( NOT uppercase_CMAKE_BUILD_TYPE STREQUAL "DEBUG" ) - add_definitions( -UNDEBUG ) - add_definitions(-DTEST_BENCHMARK_LIBRARY_HAS_NO_ASSERTIONS) - # Also remove /D NDEBUG to avoid MSVC warnings about conflicting defines. - foreach (flags_var_to_scrub - CMAKE_CXX_FLAGS_RELEASE - CMAKE_CXX_FLAGS_RELWITHDEBINFO - CMAKE_CXX_FLAGS_MINSIZEREL - CMAKE_C_FLAGS_RELEASE - CMAKE_C_FLAGS_RELWITHDEBINFO - CMAKE_C_FLAGS_MINSIZEREL) - string (REGEX REPLACE "(^| )[/-]D *NDEBUG($| )" " " - "${flags_var_to_scrub}" "${${flags_var_to_scrub}}") - endforeach() -endif() - -check_cxx_compiler_flag(-O3 BENCHMARK_HAS_O3_FLAG) -set(BENCHMARK_O3_FLAG "") -if (BENCHMARK_HAS_O3_FLAG) - set(BENCHMARK_O3_FLAG "-O3") -endif() - -# NOTE: These flags must be added after find_package(Threads REQUIRED) otherwise -# they will break the configuration check. -if (DEFINED BENCHMARK_CXX_LINKER_FLAGS) - list(APPEND CMAKE_EXE_LINKER_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}) -endif() - -add_library(output_test_helper STATIC output_test_helper.cc output_test.h) - -macro(compile_benchmark_test name) - add_executable(${name} "${name}.cc") - target_link_libraries(${name} benchmark::benchmark ${CMAKE_THREAD_LIBS_INIT}) -endmacro(compile_benchmark_test) - -macro(compile_benchmark_test_with_main name) - add_executable(${name} "${name}.cc") - target_link_libraries(${name} benchmark::benchmark_main) -endmacro(compile_benchmark_test_with_main) - -macro(compile_output_test name) - add_executable(${name} "${name}.cc" output_test.h) - target_link_libraries(${name} output_test_helper benchmark::benchmark - ${BENCHMARK_CXX_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) -endmacro(compile_output_test) - -# Demonstration executable -compile_benchmark_test(benchmark_test) -add_test(NAME benchmark COMMAND benchmark_test --benchmark_min_time=0.01) - -compile_benchmark_test(filter_test) -macro(add_filter_test name filter expect) - add_test(NAME ${name} COMMAND filter_test --benchmark_min_time=0.01 --benchmark_filter=${filter} ${expect}) - add_test(NAME ${name}_list_only COMMAND filter_test --benchmark_list_tests --benchmark_filter=${filter} ${expect}) -endmacro(add_filter_test) - -add_filter_test(filter_simple "Foo" 3) -add_filter_test(filter_simple_negative "-Foo" 2) -add_filter_test(filter_suffix "BM_.*" 4) -add_filter_test(filter_suffix_negative "-BM_.*" 1) -add_filter_test(filter_regex_all ".*" 5) -add_filter_test(filter_regex_all_negative "-.*" 0) -add_filter_test(filter_regex_blank "" 5) -add_filter_test(filter_regex_blank_negative "-" 0) -add_filter_test(filter_regex_none "monkey" 0) -add_filter_test(filter_regex_none_negative "-monkey" 5) -add_filter_test(filter_regex_wildcard ".*Foo.*" 3) -add_filter_test(filter_regex_wildcard_negative "-.*Foo.*" 2) -add_filter_test(filter_regex_begin "^BM_.*" 4) -add_filter_test(filter_regex_begin_negative "-^BM_.*" 1) -add_filter_test(filter_regex_begin2 "^N" 1) -add_filter_test(filter_regex_begin2_negative "-^N" 4) -add_filter_test(filter_regex_end ".*Ba$" 1) -add_filter_test(filter_regex_end_negative "-.*Ba$" 4) - -compile_benchmark_test(options_test) -add_test(NAME options_benchmarks COMMAND options_test --benchmark_min_time=0.01) - -compile_benchmark_test(basic_test) -add_test(NAME basic_benchmark COMMAND basic_test --benchmark_min_time=0.01) - -compile_benchmark_test(diagnostics_test) -add_test(NAME diagnostics_test COMMAND diagnostics_test --benchmark_min_time=0.01) - -compile_benchmark_test(skip_with_error_test) -add_test(NAME skip_with_error_test COMMAND skip_with_error_test --benchmark_min_time=0.01) - -compile_benchmark_test(donotoptimize_test) -# Some of the issues with DoNotOptimize only occur when optimization is enabled -check_cxx_compiler_flag(-O3 BENCHMARK_HAS_O3_FLAG) -if (BENCHMARK_HAS_O3_FLAG) - set_target_properties(donotoptimize_test PROPERTIES COMPILE_FLAGS "-O3") -endif() -add_test(NAME donotoptimize_test COMMAND donotoptimize_test --benchmark_min_time=0.01) - -compile_benchmark_test(fixture_test) -add_test(NAME fixture_test COMMAND fixture_test --benchmark_min_time=0.01) - -compile_benchmark_test(register_benchmark_test) -add_test(NAME register_benchmark_test COMMAND register_benchmark_test --benchmark_min_time=0.01) - -compile_benchmark_test(map_test) -add_test(NAME map_test COMMAND map_test --benchmark_min_time=0.01) - -compile_benchmark_test(multiple_ranges_test) -add_test(NAME multiple_ranges_test COMMAND multiple_ranges_test --benchmark_min_time=0.01) - -compile_benchmark_test(args_product_test) -add_test(NAME args_product_test COMMAND args_product_test --benchmark_min_time=0.01) - -compile_benchmark_test_with_main(link_main_test) -add_test(NAME link_main_test COMMAND link_main_test --benchmark_min_time=0.01) - -compile_output_test(reporter_output_test) -add_test(NAME reporter_output_test COMMAND reporter_output_test --benchmark_min_time=0.01) - -compile_output_test(templated_fixture_test) -add_test(NAME templated_fixture_test COMMAND templated_fixture_test --benchmark_min_time=0.01) - -compile_output_test(user_counters_test) -add_test(NAME user_counters_test COMMAND user_counters_test --benchmark_min_time=0.01) - -compile_output_test(internal_threading_test) -add_test(NAME internal_threading_test COMMAND internal_threading_test --benchmark_min_time=0.01) - -compile_output_test(report_aggregates_only_test) -add_test(NAME report_aggregates_only_test COMMAND report_aggregates_only_test --benchmark_min_time=0.01) - -compile_output_test(display_aggregates_only_test) -add_test(NAME display_aggregates_only_test COMMAND display_aggregates_only_test --benchmark_min_time=0.01) - -compile_output_test(user_counters_tabular_test) -add_test(NAME user_counters_tabular_test COMMAND user_counters_tabular_test --benchmark_counters_tabular=true --benchmark_min_time=0.01) - -compile_output_test(user_counters_thousands_test) -add_test(NAME user_counters_thousands_test COMMAND user_counters_thousands_test --benchmark_min_time=0.01) - -compile_output_test(memory_manager_test) -add_test(NAME memory_manager_test COMMAND memory_manager_test --benchmark_min_time=0.01) - -check_cxx_compiler_flag(-std=c++03 BENCHMARK_HAS_CXX03_FLAG) -if (BENCHMARK_HAS_CXX03_FLAG) - compile_benchmark_test(cxx03_test) - set_target_properties(cxx03_test - PROPERTIES - CXX_STANDARD 98 - CXX_STANDARD_REQUIRED YES) - # libstdc++ provides different definitions within between dialects. When - # LTO is enabled and -Werror is specified GCC diagnoses this ODR violation - # causing the test to fail to compile. To prevent this we explicitly disable - # the warning. - check_cxx_compiler_flag(-Wno-odr BENCHMARK_HAS_WNO_ODR) - if (BENCHMARK_ENABLE_LTO AND BENCHMARK_HAS_WNO_ODR) - set_target_properties(cxx03_test - PROPERTIES - LINK_FLAGS "-Wno-odr") - endif() - add_test(NAME cxx03 COMMAND cxx03_test --benchmark_min_time=0.01) -endif() - -# Attempt to work around flaky test failures when running on Appveyor servers. -if (DEFINED ENV{APPVEYOR}) - set(COMPLEXITY_MIN_TIME "0.5") -else() - set(COMPLEXITY_MIN_TIME "0.01") -endif() -compile_output_test(complexity_test) -add_test(NAME complexity_benchmark COMMAND complexity_test --benchmark_min_time=${COMPLEXITY_MIN_TIME}) - -############################################################################### -# GoogleTest Unit Tests -############################################################################### - -if (BENCHMARK_ENABLE_GTEST_TESTS) - macro(compile_gtest name) - add_executable(${name} "${name}.cc") - target_link_libraries(${name} benchmark::benchmark - gmock_main ${CMAKE_THREAD_LIBS_INIT}) - endmacro(compile_gtest) - - macro(add_gtest name) - compile_gtest(${name}) - add_test(NAME ${name} COMMAND ${name}) - endmacro() - - add_gtest(benchmark_gtest) - add_gtest(benchmark_name_gtest) - add_gtest(commandlineflags_gtest) - add_gtest(statistics_gtest) - add_gtest(string_util_gtest) -endif(BENCHMARK_ENABLE_GTEST_TESTS) - -############################################################################### -# Assembly Unit Tests -############################################################################### - -if (BENCHMARK_ENABLE_ASSEMBLY_TESTS) - if (NOT LLVM_FILECHECK_EXE) - message(FATAL_ERROR "LLVM FileCheck is required when including this file") - endif() - include(AssemblyTests.cmake) - add_filecheck_test(donotoptimize_assembly_test) - add_filecheck_test(state_assembly_test) - add_filecheck_test(clobber_memory_assembly_test) -endif() - - - -############################################################################### -# Code Coverage Configuration -############################################################################### - -# Add the coverage command(s) -if(CMAKE_BUILD_TYPE) - string(TOLOWER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_LOWER) -endif() -if (${CMAKE_BUILD_TYPE_LOWER} MATCHES "coverage") - find_program(GCOV gcov) - find_program(LCOV lcov) - find_program(GENHTML genhtml) - find_program(CTEST ctest) - if (GCOV AND LCOV AND GENHTML AND CTEST AND HAVE_CXX_FLAG_COVERAGE) - add_custom_command( - OUTPUT ${CMAKE_BINARY_DIR}/lcov/index.html - COMMAND ${LCOV} -q -z -d . - COMMAND ${LCOV} -q --no-external -c -b "${CMAKE_SOURCE_DIR}" -d . -o before.lcov -i - COMMAND ${CTEST} --force-new-ctest-process - COMMAND ${LCOV} -q --no-external -c -b "${CMAKE_SOURCE_DIR}" -d . -o after.lcov - COMMAND ${LCOV} -q -a before.lcov -a after.lcov --output-file final.lcov - COMMAND ${LCOV} -q -r final.lcov "'${CMAKE_SOURCE_DIR}/test/*'" -o final.lcov - COMMAND ${GENHTML} final.lcov -o lcov --demangle-cpp --sort -p "${CMAKE_BINARY_DIR}" -t benchmark - DEPENDS filter_test benchmark_test options_test basic_test fixture_test cxx03_test complexity_test - WORKING_DIRECTORY ${CMAKE_BINARY_DIR} - COMMENT "Running LCOV" - ) - add_custom_target(coverage - DEPENDS ${CMAKE_BINARY_DIR}/lcov/index.html - COMMENT "LCOV report at lcov/index.html" - ) - message(STATUS "Coverage command added") - else() - if (HAVE_CXX_FLAG_COVERAGE) - set(CXX_FLAG_COVERAGE_MESSAGE supported) - else() - set(CXX_FLAG_COVERAGE_MESSAGE unavailable) - endif() - message(WARNING - "Coverage not available:\n" - " gcov: ${GCOV}\n" - " lcov: ${LCOV}\n" - " genhtml: ${GENHTML}\n" - " ctest: ${CTEST}\n" - " --coverage flag: ${CXX_FLAG_COVERAGE_MESSAGE}") - endif() -endif() diff --git a/benchmarks/thirdparty/benchmark/test/args_product_test.cc b/benchmarks/thirdparty/benchmark/test/args_product_test.cc deleted file mode 100755 index 8a859f8415..0000000000 --- a/benchmarks/thirdparty/benchmark/test/args_product_test.cc +++ /dev/null @@ -1,77 +0,0 @@ -#include "benchmark/benchmark.h" - -#include -#include -#include -#include - -class ArgsProductFixture : public ::benchmark::Fixture { - public: - ArgsProductFixture() - : expectedValues({{0, 100, 2000, 30000}, - {1, 15, 3, 8}, - {1, 15, 3, 9}, - {1, 15, 7, 8}, - {1, 15, 7, 9}, - {1, 15, 10, 8}, - {1, 15, 10, 9}, - {2, 15, 3, 8}, - {2, 15, 3, 9}, - {2, 15, 7, 8}, - {2, 15, 7, 9}, - {2, 15, 10, 8}, - {2, 15, 10, 9}, - {4, 5, 6, 11}}) {} - - void SetUp(const ::benchmark::State& state) { - std::vector ranges = {state.range(0), state.range(1), - state.range(2), state.range(3)}; - - assert(expectedValues.find(ranges) != expectedValues.end()); - - actualValues.insert(ranges); - } - - // NOTE: This is not TearDown as we want to check after _all_ runs are - // complete. - virtual ~ArgsProductFixture() { - if (actualValues != expectedValues) { - std::cout << "EXPECTED\n"; - for (auto v : expectedValues) { - std::cout << "{"; - for (int64_t iv : v) { - std::cout << iv << ", "; - } - std::cout << "}\n"; - } - std::cout << "ACTUAL\n"; - for (auto v : actualValues) { - std::cout << "{"; - for (int64_t iv : v) { - std::cout << iv << ", "; - } - std::cout << "}\n"; - } - } - } - - std::set> expectedValues; - std::set> actualValues; -}; - -BENCHMARK_DEFINE_F(ArgsProductFixture, Empty)(benchmark::State& state) { - for (auto _ : state) { - int64_t product = - state.range(0) * state.range(1) * state.range(2) * state.range(3); - for (int64_t x = 0; x < product; x++) { - benchmark::DoNotOptimize(x); - } - } -} - -BENCHMARK_REGISTER_F(ArgsProductFixture, Empty) - ->Args({0, 100, 2000, 30000}) - ->ArgsProduct({{1, 2}, {15}, {3, 7, 10}, {8, 9}}) - ->Args({4, 5, 6, 11}); - -BENCHMARK_MAIN(); diff --git a/benchmarks/thirdparty/benchmark/test/basic_test.cc b/benchmarks/thirdparty/benchmark/test/basic_test.cc deleted file mode 100755 index 5f3dd1a3ee..0000000000 --- a/benchmarks/thirdparty/benchmark/test/basic_test.cc +++ /dev/null @@ -1,136 +0,0 @@ - -#include "benchmark/benchmark.h" - -#define BASIC_BENCHMARK_TEST(x) BENCHMARK(x)->Arg(8)->Arg(512)->Arg(8192) - -void BM_empty(benchmark::State& state) { - for (auto _ : state) { - benchmark::DoNotOptimize(state.iterations()); - } -} -BENCHMARK(BM_empty); -BENCHMARK(BM_empty)->ThreadPerCpu(); - -void BM_spin_empty(benchmark::State& state) { - for (auto _ : state) { - for (int x = 0; x < state.range(0); ++x) { - benchmark::DoNotOptimize(x); - } - } -} -BASIC_BENCHMARK_TEST(BM_spin_empty); -BASIC_BENCHMARK_TEST(BM_spin_empty)->ThreadPerCpu(); - -void BM_spin_pause_before(benchmark::State& state) { - for (int i = 0; i < state.range(0); ++i) { - benchmark::DoNotOptimize(i); - } - for (auto _ : state) { - for (int i = 0; i < state.range(0); ++i) { - benchmark::DoNotOptimize(i); - } - } -} -BASIC_BENCHMARK_TEST(BM_spin_pause_before); -BASIC_BENCHMARK_TEST(BM_spin_pause_before)->ThreadPerCpu(); - -void BM_spin_pause_during(benchmark::State& state) { - for (auto _ : state) { - state.PauseTiming(); - for (int i = 0; i < state.range(0); ++i) { - benchmark::DoNotOptimize(i); - } - state.ResumeTiming(); - for (int i = 0; i < state.range(0); ++i) { - benchmark::DoNotOptimize(i); - } - } -} -BASIC_BENCHMARK_TEST(BM_spin_pause_during); -BASIC_BENCHMARK_TEST(BM_spin_pause_during)->ThreadPerCpu(); - -void BM_pause_during(benchmark::State& state) { - for (auto _ : state) { - state.PauseTiming(); - state.ResumeTiming(); - } -} -BENCHMARK(BM_pause_during); -BENCHMARK(BM_pause_during)->ThreadPerCpu(); -BENCHMARK(BM_pause_during)->UseRealTime(); -BENCHMARK(BM_pause_during)->UseRealTime()->ThreadPerCpu(); - -void BM_spin_pause_after(benchmark::State& state) { - for (auto _ : state) { - for (int i = 0; i < state.range(0); ++i) { - benchmark::DoNotOptimize(i); - } - } - for (int i = 0; i < state.range(0); ++i) { - benchmark::DoNotOptimize(i); - } -} -BASIC_BENCHMARK_TEST(BM_spin_pause_after); -BASIC_BENCHMARK_TEST(BM_spin_pause_after)->ThreadPerCpu(); - -void BM_spin_pause_before_and_after(benchmark::State& state) { - for (int i = 0; i < state.range(0); ++i) { - benchmark::DoNotOptimize(i); - } - for (auto _ : state) { - for (int i = 0; i < state.range(0); ++i) { - benchmark::DoNotOptimize(i); - } - } - for (int i = 0; i < state.range(0); ++i) { - benchmark::DoNotOptimize(i); - } -} -BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after); -BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after)->ThreadPerCpu(); - -void BM_empty_stop_start(benchmark::State& state) { - for (auto _ : state) { - } -} -BENCHMARK(BM_empty_stop_start); -BENCHMARK(BM_empty_stop_start)->ThreadPerCpu(); - - -void BM_KeepRunning(benchmark::State& state) { - benchmark::IterationCount iter_count = 0; - assert(iter_count == state.iterations()); - while (state.KeepRunning()) { - ++iter_count; - } - assert(iter_count == state.iterations()); -} -BENCHMARK(BM_KeepRunning); - -void BM_KeepRunningBatch(benchmark::State& state) { - // Choose a prime batch size to avoid evenly dividing max_iterations. - const benchmark::IterationCount batch_size = 101; - benchmark::IterationCount iter_count = 0; - while (state.KeepRunningBatch(batch_size)) { - iter_count += batch_size; - } - assert(state.iterations() == iter_count); -} -BENCHMARK(BM_KeepRunningBatch); - -void BM_RangedFor(benchmark::State& state) { - benchmark::IterationCount iter_count = 0; - for (auto _ : state) { - ++iter_count; - } - assert(iter_count == state.max_iterations); -} -BENCHMARK(BM_RangedFor); - -// Ensure that StateIterator provides all the necessary typedefs required to -// instantiate std::iterator_traits. -static_assert(std::is_same< - typename std::iterator_traits::value_type, - typename benchmark::State::StateIterator::value_type>::value, ""); - -BENCHMARK_MAIN(); diff --git a/benchmarks/thirdparty/benchmark/test/benchmark_gtest.cc b/benchmarks/thirdparty/benchmark/test/benchmark_gtest.cc deleted file mode 100755 index 9557b20ec7..0000000000 --- a/benchmarks/thirdparty/benchmark/test/benchmark_gtest.cc +++ /dev/null @@ -1,128 +0,0 @@ -#include - -#include "../src/benchmark_register.h" -#include "gmock/gmock.h" -#include "gtest/gtest.h" - -namespace benchmark { -namespace internal { -namespace { - -TEST(AddRangeTest, Simple) { - std::vector dst; - AddRange(&dst, 1, 2, 2); - EXPECT_THAT(dst, testing::ElementsAre(1, 2)); -} - -TEST(AddRangeTest, Simple64) { - std::vector dst; - AddRange(&dst, static_cast(1), static_cast(2), 2); - EXPECT_THAT(dst, testing::ElementsAre(1, 2)); -} - -TEST(AddRangeTest, Advanced) { - std::vector dst; - AddRange(&dst, 5, 15, 2); - EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15)); -} - -TEST(AddRangeTest, Advanced64) { - std::vector dst; - AddRange(&dst, static_cast(5), static_cast(15), 2); - EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15)); -} - -TEST(AddRangeTest, FullRange8) { - std::vector dst; - AddRange(&dst, int8_t{1}, std::numeric_limits::max(), 8); - EXPECT_THAT(dst, testing::ElementsAre(1, 8, 64, 127)); -} - -TEST(AddRangeTest, FullRange64) { - std::vector dst; - AddRange(&dst, int64_t{1}, std::numeric_limits::max(), 1024); - EXPECT_THAT( - dst, testing::ElementsAre(1LL, 1024LL, 1048576LL, 1073741824LL, - 1099511627776LL, 1125899906842624LL, - 1152921504606846976LL, 9223372036854775807LL)); -} - -TEST(AddRangeTest, NegativeRanges) { - std::vector dst; - AddRange(&dst, -8, 0, 2); - EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1, 0)); -} - -TEST(AddRangeTest, StrictlyNegative) { - std::vector dst; - AddRange(&dst, -8, -1, 2); - EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1)); -} - -TEST(AddRangeTest, SymmetricNegativeRanges) { - std::vector dst; - AddRange(&dst, -8, 8, 2); - EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1, 0, 1, 2, 4, 8)); -} - -TEST(AddRangeTest, SymmetricNegativeRangesOddMult) { - std::vector dst; - AddRange(&dst, -30, 32, 5); - EXPECT_THAT(dst, testing::ElementsAre(-30, -25, -5, -1, 0, 1, 5, 25, 32)); -} - -TEST(AddRangeTest, NegativeRangesAsymmetric) { - std::vector dst; - AddRange(&dst, -3, 5, 2); - EXPECT_THAT(dst, testing::ElementsAre(-3, -2, -1, 0, 1, 2, 4, 5)); -} - -TEST(AddRangeTest, NegativeRangesLargeStep) { - // Always include -1, 0, 1 when crossing zero. - std::vector dst; - AddRange(&dst, -8, 8, 10); - EXPECT_THAT(dst, testing::ElementsAre(-8, -1, 0, 1, 8)); -} - -TEST(AddRangeTest, ZeroOnlyRange) { - std::vector dst; - AddRange(&dst, 0, 0, 2); - EXPECT_THAT(dst, testing::ElementsAre(0)); -} - -TEST(AddRangeTest, NegativeRange64) { - std::vector dst; - AddRange(&dst, -4, 4, 2); - EXPECT_THAT(dst, testing::ElementsAre(-4, -2, -1, 0, 1, 2, 4)); -} - -TEST(AddRangeTest, NegativeRangePreservesExistingOrder) { - // If elements already exist in the range, ensure we don't change - // their ordering by adding negative values. - std::vector dst = {1, 2, 3}; - AddRange(&dst, -2, 2, 2); - EXPECT_THAT(dst, testing::ElementsAre(1, 2, 3, -2, -1, 0, 1, 2)); -} - -TEST(AddRangeTest, FullNegativeRange64) { - std::vector dst; - const auto min = std::numeric_limits::min(); - const auto max = std::numeric_limits::max(); - AddRange(&dst, min, max, 1024); - EXPECT_THAT( - dst, testing::ElementsAreArray(std::vector{ - min, -1152921504606846976LL, -1125899906842624LL, - -1099511627776LL, -1073741824LL, -1048576LL, -1024LL, -1LL, 0LL, - 1LL, 1024LL, 1048576LL, 1073741824LL, 1099511627776LL, - 1125899906842624LL, 1152921504606846976LL, max})); -} - -TEST(AddRangeTest, Simple8) { - std::vector dst; - AddRange(&dst, 1, 8, 2); - EXPECT_THAT(dst, testing::ElementsAre(1, 2, 4, 8)); -} - -} // namespace -} // namespace internal -} // namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/test/benchmark_name_gtest.cc b/benchmarks/thirdparty/benchmark/test/benchmark_name_gtest.cc deleted file mode 100755 index afb401c1f5..0000000000 --- a/benchmarks/thirdparty/benchmark/test/benchmark_name_gtest.cc +++ /dev/null @@ -1,74 +0,0 @@ -#include "benchmark/benchmark.h" -#include "gtest/gtest.h" - -namespace { - -using namespace benchmark; -using namespace benchmark::internal; - -TEST(BenchmarkNameTest, Empty) { - const auto name = BenchmarkName(); - EXPECT_EQ(name.str(), std::string()); -} - -TEST(BenchmarkNameTest, FunctionName) { - auto name = BenchmarkName(); - name.function_name = "function_name"; - EXPECT_EQ(name.str(), "function_name"); -} - -TEST(BenchmarkNameTest, FunctionNameAndArgs) { - auto name = BenchmarkName(); - name.function_name = "function_name"; - name.args = "some_args:3/4/5"; - EXPECT_EQ(name.str(), "function_name/some_args:3/4/5"); -} - -TEST(BenchmarkNameTest, MinTime) { - auto name = BenchmarkName(); - name.function_name = "function_name"; - name.args = "some_args:3/4"; - name.min_time = "min_time:3.4s"; - EXPECT_EQ(name.str(), "function_name/some_args:3/4/min_time:3.4s"); -} - -TEST(BenchmarkNameTest, Iterations) { - auto name = BenchmarkName(); - name.function_name = "function_name"; - name.min_time = "min_time:3.4s"; - name.iterations = "iterations:42"; - EXPECT_EQ(name.str(), "function_name/min_time:3.4s/iterations:42"); -} - -TEST(BenchmarkNameTest, Repetitions) { - auto name = BenchmarkName(); - name.function_name = "function_name"; - name.min_time = "min_time:3.4s"; - name.repetitions = "repetitions:24"; - EXPECT_EQ(name.str(), "function_name/min_time:3.4s/repetitions:24"); -} - -TEST(BenchmarkNameTest, TimeType) { - auto name = BenchmarkName(); - name.function_name = "function_name"; - name.min_time = "min_time:3.4s"; - name.time_type = "hammer_time"; - EXPECT_EQ(name.str(), "function_name/min_time:3.4s/hammer_time"); -} - -TEST(BenchmarkNameTest, Threads) { - auto name = BenchmarkName(); - name.function_name = "function_name"; - name.min_time = "min_time:3.4s"; - name.threads = "threads:256"; - EXPECT_EQ(name.str(), "function_name/min_time:3.4s/threads:256"); -} - -TEST(BenchmarkNameTest, TestEmptyFunctionName) { - auto name = BenchmarkName(); - name.args = "first:3/second:4"; - name.threads = "threads:22"; - EXPECT_EQ(name.str(), "first:3/second:4/threads:22"); -} - -} // end namespace diff --git a/benchmarks/thirdparty/benchmark/test/benchmark_test.cc b/benchmarks/thirdparty/benchmark/test/benchmark_test.cc deleted file mode 100755 index 3cd4f5565f..0000000000 --- a/benchmarks/thirdparty/benchmark/test/benchmark_test.cc +++ /dev/null @@ -1,245 +0,0 @@ -#include "benchmark/benchmark.h" - -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#if defined(__GNUC__) -#define BENCHMARK_NOINLINE __attribute__((noinline)) -#else -#define BENCHMARK_NOINLINE -#endif - -namespace { - -int BENCHMARK_NOINLINE Factorial(uint32_t n) { - return (n == 1) ? 1 : n * Factorial(n - 1); -} - -double CalculatePi(int depth) { - double pi = 0.0; - for (int i = 0; i < depth; ++i) { - double numerator = static_cast(((i % 2) * 2) - 1); - double denominator = static_cast((2 * i) - 1); - pi += numerator / denominator; - } - return (pi - 1.0) * 4; -} - -std::set ConstructRandomSet(int64_t size) { - std::set s; - for (int i = 0; i < size; ++i) s.insert(s.end(), i); - return s; -} - -std::mutex test_vector_mu; -std::vector* test_vector = nullptr; - -} // end namespace - -static void BM_Factorial(benchmark::State& state) { - int fac_42 = 0; - for (auto _ : state) fac_42 = Factorial(8); - // Prevent compiler optimizations - std::stringstream ss; - ss << fac_42; - state.SetLabel(ss.str()); -} -BENCHMARK(BM_Factorial); -BENCHMARK(BM_Factorial)->UseRealTime(); - -static void BM_CalculatePiRange(benchmark::State& state) { - double pi = 0.0; - for (auto _ : state) pi = CalculatePi(static_cast(state.range(0))); - std::stringstream ss; - ss << pi; - state.SetLabel(ss.str()); -} -BENCHMARK_RANGE(BM_CalculatePiRange, 1, 1024 * 1024); - -static void BM_CalculatePi(benchmark::State& state) { - static const int depth = 1024; - for (auto _ : state) { - benchmark::DoNotOptimize(CalculatePi(static_cast(depth))); - } -} -BENCHMARK(BM_CalculatePi)->Threads(8); -BENCHMARK(BM_CalculatePi)->ThreadRange(1, 32); -BENCHMARK(BM_CalculatePi)->ThreadPerCpu(); - -static void BM_SetInsert(benchmark::State& state) { - std::set data; - for (auto _ : state) { - state.PauseTiming(); - data = ConstructRandomSet(state.range(0)); - state.ResumeTiming(); - for (int j = 0; j < state.range(1); ++j) data.insert(rand()); - } - state.SetItemsProcessed(state.iterations() * state.range(1)); - state.SetBytesProcessed(state.iterations() * state.range(1) * sizeof(int)); -} - -// Test many inserts at once to reduce the total iterations needed. Otherwise, the slower, -// non-timed part of each iteration will make the benchmark take forever. -BENCHMARK(BM_SetInsert)->Ranges({{1 << 10, 8 << 10}, {128, 512}}); - -template -static void BM_Sequential(benchmark::State& state) { - ValueType v = 42; - for (auto _ : state) { - Container c; - for (int64_t i = state.range(0); --i;) c.push_back(v); - } - const int64_t items_processed = state.iterations() * state.range(0); - state.SetItemsProcessed(items_processed); - state.SetBytesProcessed(items_processed * sizeof(v)); -} -BENCHMARK_TEMPLATE2(BM_Sequential, std::vector, int) - ->Range(1 << 0, 1 << 10); -BENCHMARK_TEMPLATE(BM_Sequential, std::list)->Range(1 << 0, 1 << 10); -// Test the variadic version of BENCHMARK_TEMPLATE in C++11 and beyond. -#ifdef BENCHMARK_HAS_CXX11 -BENCHMARK_TEMPLATE(BM_Sequential, std::vector, int)->Arg(512); -#endif - -static void BM_StringCompare(benchmark::State& state) { - size_t len = static_cast(state.range(0)); - std::string s1(len, '-'); - std::string s2(len, '-'); - for (auto _ : state) benchmark::DoNotOptimize(s1.compare(s2)); -} -BENCHMARK(BM_StringCompare)->Range(1, 1 << 20); - -static void BM_SetupTeardown(benchmark::State& state) { - if (state.thread_index == 0) { - // No need to lock test_vector_mu here as this is running single-threaded. - test_vector = new std::vector(); - } - int i = 0; - for (auto _ : state) { - std::lock_guard l(test_vector_mu); - if (i % 2 == 0) - test_vector->push_back(i); - else - test_vector->pop_back(); - ++i; - } - if (state.thread_index == 0) { - delete test_vector; - } -} -BENCHMARK(BM_SetupTeardown)->ThreadPerCpu(); - -static void BM_LongTest(benchmark::State& state) { - double tracker = 0.0; - for (auto _ : state) { - for (int i = 0; i < state.range(0); ++i) - benchmark::DoNotOptimize(tracker += i); - } -} -BENCHMARK(BM_LongTest)->Range(1 << 16, 1 << 28); - -static void BM_ParallelMemset(benchmark::State& state) { - int64_t size = state.range(0) / static_cast(sizeof(int)); - int thread_size = static_cast(size) / state.threads; - int from = thread_size * state.thread_index; - int to = from + thread_size; - - if (state.thread_index == 0) { - test_vector = new std::vector(static_cast(size)); - } - - for (auto _ : state) { - for (int i = from; i < to; i++) { - // No need to lock test_vector_mu as ranges - // do not overlap between threads. - benchmark::DoNotOptimize(test_vector->at(i) = 1); - } - } - - if (state.thread_index == 0) { - delete test_vector; - } -} -BENCHMARK(BM_ParallelMemset)->Arg(10 << 20)->ThreadRange(1, 4); - -static void BM_ManualTiming(benchmark::State& state) { - int64_t slept_for = 0; - int64_t microseconds = state.range(0); - std::chrono::duration sleep_duration{ - static_cast(microseconds)}; - - for (auto _ : state) { - auto start = std::chrono::high_resolution_clock::now(); - // Simulate some useful workload with a sleep - std::this_thread::sleep_for( - std::chrono::duration_cast(sleep_duration)); - auto end = std::chrono::high_resolution_clock::now(); - - auto elapsed = - std::chrono::duration_cast>(end - start); - - state.SetIterationTime(elapsed.count()); - slept_for += microseconds; - } - state.SetItemsProcessed(slept_for); -} -BENCHMARK(BM_ManualTiming)->Range(1, 1 << 14)->UseRealTime(); -BENCHMARK(BM_ManualTiming)->Range(1, 1 << 14)->UseManualTime(); - -#ifdef BENCHMARK_HAS_CXX11 - -template -void BM_with_args(benchmark::State& state, Args&&...) { - for (auto _ : state) { - } -} -BENCHMARK_CAPTURE(BM_with_args, int_test, 42, 43, 44); -BENCHMARK_CAPTURE(BM_with_args, string_and_pair_test, std::string("abc"), - std::pair(42, 3.8)); - -void BM_non_template_args(benchmark::State& state, int, double) { - while(state.KeepRunning()) {} -} -BENCHMARK_CAPTURE(BM_non_template_args, basic_test, 0, 0); - -#endif // BENCHMARK_HAS_CXX11 - -static void BM_DenseThreadRanges(benchmark::State& st) { - switch (st.range(0)) { - case 1: - assert(st.threads == 1 || st.threads == 2 || st.threads == 3); - break; - case 2: - assert(st.threads == 1 || st.threads == 3 || st.threads == 4); - break; - case 3: - assert(st.threads == 5 || st.threads == 8 || st.threads == 11 || - st.threads == 14); - break; - default: - assert(false && "Invalid test case number"); - } - while (st.KeepRunning()) { - } -} -BENCHMARK(BM_DenseThreadRanges)->Arg(1)->DenseThreadRange(1, 3); -BENCHMARK(BM_DenseThreadRanges)->Arg(2)->DenseThreadRange(1, 4, 2); -BENCHMARK(BM_DenseThreadRanges)->Arg(3)->DenseThreadRange(5, 14, 3); - -BENCHMARK_MAIN(); diff --git a/benchmarks/thirdparty/benchmark/test/clobber_memory_assembly_test.cc b/benchmarks/thirdparty/benchmark/test/clobber_memory_assembly_test.cc deleted file mode 100755 index f41911a39c..0000000000 --- a/benchmarks/thirdparty/benchmark/test/clobber_memory_assembly_test.cc +++ /dev/null @@ -1,64 +0,0 @@ -#include - -#ifdef __clang__ -#pragma clang diagnostic ignored "-Wreturn-type" -#endif - -extern "C" { - -extern int ExternInt; -extern int ExternInt2; -extern int ExternInt3; - -} - -// CHECK-LABEL: test_basic: -extern "C" void test_basic() { - int x; - benchmark::DoNotOptimize(&x); - x = 101; - benchmark::ClobberMemory(); - // CHECK: leaq [[DEST:[^,]+]], %rax - // CHECK: movl $101, [[DEST]] - // CHECK: ret -} - -// CHECK-LABEL: test_redundant_store: -extern "C" void test_redundant_store() { - ExternInt = 3; - benchmark::ClobberMemory(); - ExternInt = 51; - // CHECK-DAG: ExternInt - // CHECK-DAG: movl $3 - // CHECK: movl $51 -} - -// CHECK-LABEL: test_redundant_read: -extern "C" void test_redundant_read() { - int x; - benchmark::DoNotOptimize(&x); - x = ExternInt; - benchmark::ClobberMemory(); - x = ExternInt2; - // CHECK: leaq [[DEST:[^,]+]], %rax - // CHECK: ExternInt(%rip) - // CHECK: movl %eax, [[DEST]] - // CHECK-NOT: ExternInt2 - // CHECK: ret -} - -// CHECK-LABEL: test_redundant_read2: -extern "C" void test_redundant_read2() { - int x; - benchmark::DoNotOptimize(&x); - x = ExternInt; - benchmark::ClobberMemory(); - x = ExternInt2; - benchmark::ClobberMemory(); - // CHECK: leaq [[DEST:[^,]+]], %rax - // CHECK: ExternInt(%rip) - // CHECK: movl %eax, [[DEST]] - // CHECK: ExternInt2(%rip) - // CHECK: movl %eax, [[DEST]] - // CHECK: ret -} diff --git a/benchmarks/thirdparty/benchmark/test/commandlineflags_gtest.cc b/benchmarks/thirdparty/benchmark/test/commandlineflags_gtest.cc deleted file mode 100755 index 656020f2ec..0000000000 --- a/benchmarks/thirdparty/benchmark/test/commandlineflags_gtest.cc +++ /dev/null @@ -1,201 +0,0 @@ -#include - -#include "../src/commandlineflags.h" -#include "../src/internal_macros.h" -#include "gtest/gtest.h" - -namespace benchmark { -namespace { - -#if defined(BENCHMARK_OS_WINDOWS) -int setenv(const char* name, const char* value, int overwrite) { - if (!overwrite) { - // NOTE: getenv_s is far superior but not available under mingw. - char* env_value = getenv(name); - if (env_value == nullptr) { - return -1; - } - } - return _putenv_s(name, value); -} - -int unsetenv(const char* name) { - return _putenv_s(name, ""); -} - -#endif // BENCHMARK_OS_WINDOWS - -TEST(BoolFromEnv, Default) { - ASSERT_EQ(unsetenv("NOT_IN_ENV"), 0); - EXPECT_EQ(BoolFromEnv("not_in_env", true), true); -} - -TEST(BoolFromEnv, False) { - ASSERT_EQ(setenv("IN_ENV", "0", 1), 0); - EXPECT_EQ(BoolFromEnv("in_env", true), false); - unsetenv("IN_ENV"); - - ASSERT_EQ(setenv("IN_ENV", "N", 1), 0); - EXPECT_EQ(BoolFromEnv("in_env", true), false); - unsetenv("IN_ENV"); - - ASSERT_EQ(setenv("IN_ENV", "n", 1), 0); - EXPECT_EQ(BoolFromEnv("in_env", true), false); - unsetenv("IN_ENV"); - - ASSERT_EQ(setenv("IN_ENV", "NO", 1), 0); - EXPECT_EQ(BoolFromEnv("in_env", true), false); - unsetenv("IN_ENV"); - - ASSERT_EQ(setenv("IN_ENV", "No", 1), 0); - EXPECT_EQ(BoolFromEnv("in_env", true), false); - unsetenv("IN_ENV"); - - ASSERT_EQ(setenv("IN_ENV", "no", 1), 0); - EXPECT_EQ(BoolFromEnv("in_env", true), false); - unsetenv("IN_ENV"); - - ASSERT_EQ(setenv("IN_ENV", "F", 1), 0); - EXPECT_EQ(BoolFromEnv("in_env", true), false); - unsetenv("IN_ENV"); - - ASSERT_EQ(setenv("IN_ENV", "f", 1), 0); - EXPECT_EQ(BoolFromEnv("in_env", true), false); - unsetenv("IN_ENV"); - - ASSERT_EQ(setenv("IN_ENV", "FALSE", 1), 0); - EXPECT_EQ(BoolFromEnv("in_env", true), false); - unsetenv("IN_ENV"); - - ASSERT_EQ(setenv("IN_ENV", "False", 1), 0); - EXPECT_EQ(BoolFromEnv("in_env", true), false); - unsetenv("IN_ENV"); - - ASSERT_EQ(setenv("IN_ENV", "false", 1), 0); - EXPECT_EQ(BoolFromEnv("in_env", true), false); - unsetenv("IN_ENV"); - - ASSERT_EQ(setenv("IN_ENV", "OFF", 1), 0); - EXPECT_EQ(BoolFromEnv("in_env", true), false); - unsetenv("IN_ENV"); - - ASSERT_EQ(setenv("IN_ENV", "Off", 1), 0); - EXPECT_EQ(BoolFromEnv("in_env", true), false); - unsetenv("IN_ENV"); - - ASSERT_EQ(setenv("IN_ENV", "off", 1), 0); - EXPECT_EQ(BoolFromEnv("in_env", true), false); - unsetenv("IN_ENV"); -} - -TEST(BoolFromEnv, True) { - ASSERT_EQ(setenv("IN_ENV", "1", 1), 0); - EXPECT_EQ(BoolFromEnv("in_env", false), true); - unsetenv("IN_ENV"); - - ASSERT_EQ(setenv("IN_ENV", "Y", 1), 0); - EXPECT_EQ(BoolFromEnv("in_env", false), true); - unsetenv("IN_ENV"); - - ASSERT_EQ(setenv("IN_ENV", "y", 1), 0); - EXPECT_EQ(BoolFromEnv("in_env", false), true); - unsetenv("IN_ENV"); - - ASSERT_EQ(setenv("IN_ENV", "YES", 1), 0); - EXPECT_EQ(BoolFromEnv("in_env", false), true); - unsetenv("IN_ENV"); - - ASSERT_EQ(setenv("IN_ENV", "Yes", 1), 0); - EXPECT_EQ(BoolFromEnv("in_env", false), true); - unsetenv("IN_ENV"); - - ASSERT_EQ(setenv("IN_ENV", "yes", 1), 0); - EXPECT_EQ(BoolFromEnv("in_env", false), true); - unsetenv("IN_ENV"); - - ASSERT_EQ(setenv("IN_ENV", "T", 1), 0); - EXPECT_EQ(BoolFromEnv("in_env", false), true); - unsetenv("IN_ENV"); - - ASSERT_EQ(setenv("IN_ENV", "t", 1), 0); - EXPECT_EQ(BoolFromEnv("in_env", false), true); - unsetenv("IN_ENV"); - - ASSERT_EQ(setenv("IN_ENV", "TRUE", 1), 0); - EXPECT_EQ(BoolFromEnv("in_env", false), true); - unsetenv("IN_ENV"); - - ASSERT_EQ(setenv("IN_ENV", "True", 1), 0); - EXPECT_EQ(BoolFromEnv("in_env", false), true); - unsetenv("IN_ENV"); - - ASSERT_EQ(setenv("IN_ENV", "true", 1), 0); - EXPECT_EQ(BoolFromEnv("in_env", false), true); - unsetenv("IN_ENV"); - - ASSERT_EQ(setenv("IN_ENV", "ON", 1), 0); - EXPECT_EQ(BoolFromEnv("in_env", false), true); - unsetenv("IN_ENV"); - - ASSERT_EQ(setenv("IN_ENV", "On", 1), 0); - EXPECT_EQ(BoolFromEnv("in_env", false), true); - unsetenv("IN_ENV"); - - ASSERT_EQ(setenv("IN_ENV", "on", 1), 0); - EXPECT_EQ(BoolFromEnv("in_env", false), true); - unsetenv("IN_ENV"); - -#ifndef BENCHMARK_OS_WINDOWS - ASSERT_EQ(setenv("IN_ENV", "", 1), 0); - EXPECT_EQ(BoolFromEnv("in_env", false), true); - unsetenv("IN_ENV"); -#endif -} - -TEST(Int32FromEnv, NotInEnv) { - ASSERT_EQ(unsetenv("NOT_IN_ENV"), 0); - EXPECT_EQ(Int32FromEnv("not_in_env", 42), 42); -} - -TEST(Int32FromEnv, InvalidInteger) { - ASSERT_EQ(setenv("IN_ENV", "foo", 1), 0); - EXPECT_EQ(Int32FromEnv("in_env", 42), 42); - unsetenv("IN_ENV"); -} - -TEST(Int32FromEnv, ValidInteger) { - ASSERT_EQ(setenv("IN_ENV", "42", 1), 0); - EXPECT_EQ(Int32FromEnv("in_env", 64), 42); - unsetenv("IN_ENV"); -} - -TEST(DoubleFromEnv, NotInEnv) { - ASSERT_EQ(unsetenv("NOT_IN_ENV"), 0); - EXPECT_EQ(DoubleFromEnv("not_in_env", 0.51), 0.51); -} - -TEST(DoubleFromEnv, InvalidReal) { - ASSERT_EQ(setenv("IN_ENV", "foo", 1), 0); - EXPECT_EQ(DoubleFromEnv("in_env", 0.51), 0.51); - unsetenv("IN_ENV"); -} - -TEST(DoubleFromEnv, ValidReal) { - ASSERT_EQ(setenv("IN_ENV", "0.51", 1), 0); - EXPECT_EQ(DoubleFromEnv("in_env", 0.71), 0.51); - unsetenv("IN_ENV"); -} - -TEST(StringFromEnv, Default) { - ASSERT_EQ(unsetenv("NOT_IN_ENV"), 0); - EXPECT_STREQ(StringFromEnv("not_in_env", "foo"), "foo"); -} - -TEST(StringFromEnv, Valid) { - ASSERT_EQ(setenv("IN_ENV", "foo", 1), 0); - EXPECT_STREQ(StringFromEnv("in_env", "bar"), "foo"); - unsetenv("IN_ENV"); -} - -} // namespace -} // namespace benchmark diff --git a/benchmarks/thirdparty/benchmark/test/complexity_test.cc b/benchmarks/thirdparty/benchmark/test/complexity_test.cc deleted file mode 100755 index 5681fdcf34..0000000000 --- a/benchmarks/thirdparty/benchmark/test/complexity_test.cc +++ /dev/null @@ -1,213 +0,0 @@ -#undef NDEBUG -#include -#include -#include -#include -#include -#include "benchmark/benchmark.h" -#include "output_test.h" - -namespace { - -#define ADD_COMPLEXITY_CASES(...) \ - int CONCAT(dummy, __LINE__) = AddComplexityTest(__VA_ARGS__) - -int AddComplexityTest(std::string test_name, std::string big_o_test_name, - std::string rms_test_name, std::string big_o) { - SetSubstitutions({{"%name", test_name}, - {"%bigo_name", big_o_test_name}, - {"%rms_name", rms_test_name}, - {"%bigo_str", "[ ]* %float " + big_o}, - {"%bigo", big_o}, - {"%rms", "[ ]*[0-9]+ %"}}); - AddCases( - TC_ConsoleOut, - {{"^%bigo_name %bigo_str %bigo_str[ ]*$"}, - {"^%bigo_name", MR_Not}, // Assert we we didn't only matched a name. - {"^%rms_name %rms %rms[ ]*$", MR_Next}}); - AddCases(TC_JSONOut, {{"\"name\": \"%bigo_name\",$"}, - {"\"run_name\": \"%name\",$", MR_Next}, - {"\"run_type\": \"aggregate\",$", MR_Next}, - {"\"repetitions\": %int,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"aggregate_name\": \"BigO\",$", MR_Next}, - {"\"cpu_coefficient\": %float,$", MR_Next}, - {"\"real_coefficient\": %float,$", MR_Next}, - {"\"big_o\": \"%bigo\",$", MR_Next}, - {"\"time_unit\": \"ns\"$", MR_Next}, - {"}", MR_Next}, - {"\"name\": \"%rms_name\",$"}, - {"\"run_name\": \"%name\",$", MR_Next}, - {"\"run_type\": \"aggregate\",$", MR_Next}, - {"\"repetitions\": %int,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"aggregate_name\": \"RMS\",$", MR_Next}, - {"\"rms\": %float$", MR_Next}, - {"}", MR_Next}}); - AddCases(TC_CSVOut, {{"^\"%bigo_name\",,%float,%float,%bigo,,,,,$"}, - {"^\"%bigo_name\"", MR_Not}, - {"^\"%rms_name\",,%float,%float,,,,,,$", MR_Next}}); - return 0; -} - -} // end namespace - -// ========================================================================= // -// --------------------------- Testing BigO O(1) --------------------------- // -// ========================================================================= // - -void BM_Complexity_O1(benchmark::State& state) { - for (auto _ : state) { - for (int i = 0; i < 1024; ++i) { - benchmark::DoNotOptimize(&i); - } - } - state.SetComplexityN(state.range(0)); -} -BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1); -BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(); -BENCHMARK(BM_Complexity_O1) - ->Range(1, 1 << 18) - ->Complexity([](benchmark::IterationCount) { return 1.0; }); - -const char *one_test_name = "BM_Complexity_O1"; -const char *big_o_1_test_name = "BM_Complexity_O1_BigO"; -const char *rms_o_1_test_name = "BM_Complexity_O1_RMS"; -const char *enum_big_o_1 = "\\([0-9]+\\)"; -// FIXME: Tolerate both '(1)' and 'lgN' as output when the complexity is auto -// deduced. -// See https://github.com/google/benchmark/issues/272 -const char *auto_big_o_1 = "(\\([0-9]+\\))|(lgN)"; -const char *lambda_big_o_1 = "f\\(N\\)"; - -// Add enum tests -ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, - enum_big_o_1); - -// Add auto enum tests -ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, - auto_big_o_1); - -// Add lambda tests -ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, - lambda_big_o_1); - -// ========================================================================= // -// --------------------------- Testing BigO O(N) --------------------------- // -// ========================================================================= // - -std::vector ConstructRandomVector(int64_t size) { - std::vector v; - v.reserve(static_cast(size)); - for (int i = 0; i < size; ++i) { - v.push_back(static_cast(std::rand() % size)); - } - return v; -} - -void BM_Complexity_O_N(benchmark::State& state) { - auto v = ConstructRandomVector(state.range(0)); - // Test worst case scenario (item not in vector) - const int64_t item_not_in_vector = state.range(0) * 2; - for (auto _ : state) { - benchmark::DoNotOptimize(std::find(v.begin(), v.end(), item_not_in_vector)); - } - state.SetComplexityN(state.range(0)); -} -BENCHMARK(BM_Complexity_O_N) - ->RangeMultiplier(2) - ->Range(1 << 10, 1 << 16) - ->Complexity(benchmark::oN); -BENCHMARK(BM_Complexity_O_N) - ->RangeMultiplier(2) - ->Range(1 << 10, 1 << 16) - ->Complexity([](benchmark::IterationCount n) -> double { - return static_cast(n); - }); -BENCHMARK(BM_Complexity_O_N) - ->RangeMultiplier(2) - ->Range(1 << 10, 1 << 16) - ->Complexity(); - -const char *n_test_name = "BM_Complexity_O_N"; -const char *big_o_n_test_name = "BM_Complexity_O_N_BigO"; -const char *rms_o_n_test_name = "BM_Complexity_O_N_RMS"; -const char *enum_auto_big_o_n = "N"; -const char *lambda_big_o_n = "f\\(N\\)"; - -// Add enum tests -ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name, - enum_auto_big_o_n); - -// Add lambda tests -ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name, - lambda_big_o_n); - -// ========================================================================= // -// ------------------------- Testing BigO O(N*lgN) ------------------------- // -// ========================================================================= // - -static void BM_Complexity_O_N_log_N(benchmark::State& state) { - auto v = ConstructRandomVector(state.range(0)); - for (auto _ : state) { - std::sort(v.begin(), v.end()); - } - state.SetComplexityN(state.range(0)); -} -static const double kLog2E = 1.44269504088896340736; -BENCHMARK(BM_Complexity_O_N_log_N) - ->RangeMultiplier(2) - ->Range(1 << 10, 1 << 16) - ->Complexity(benchmark::oNLogN); -BENCHMARK(BM_Complexity_O_N_log_N) - ->RangeMultiplier(2) - ->Range(1 << 10, 1 << 16) - ->Complexity([](benchmark::IterationCount n) { - return kLog2E * n * log(static_cast(n)); - }); -BENCHMARK(BM_Complexity_O_N_log_N) - ->RangeMultiplier(2) - ->Range(1 << 10, 1 << 16) - ->Complexity(); - -const char *n_lg_n_test_name = "BM_Complexity_O_N_log_N"; -const char *big_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_BigO"; -const char *rms_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_RMS"; -const char *enum_auto_big_o_n_lg_n = "NlgN"; -const char *lambda_big_o_n_lg_n = "f\\(N\\)"; - -// Add enum tests -ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name, - rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n); - -// Add lambda tests -ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name, - rms_o_n_lg_n_test_name, lambda_big_o_n_lg_n); - -// ========================================================================= // -// -------- Testing formatting of Complexity with captured args ------------ // -// ========================================================================= // - -void BM_ComplexityCaptureArgs(benchmark::State& state, int n) { - for (auto _ : state) { - // This test requires a non-zero CPU time to avoid divide-by-zero - benchmark::DoNotOptimize(state.iterations()); - } - state.SetComplexityN(n); -} - -BENCHMARK_CAPTURE(BM_ComplexityCaptureArgs, capture_test, 100) - ->Complexity(benchmark::oN) - ->Ranges({{1, 2}, {3, 4}}); - -const std::string complexity_capture_name = - "BM_ComplexityCaptureArgs/capture_test"; - -ADD_COMPLEXITY_CASES(complexity_capture_name, complexity_capture_name + "_BigO", - complexity_capture_name + "_RMS", "N"); - -// ========================================================================= // -// --------------------------- TEST CASES END ------------------------------ // -// ========================================================================= // - -int main(int argc, char *argv[]) { RunOutputTests(argc, argv); } diff --git a/benchmarks/thirdparty/benchmark/test/cxx03_test.cc b/benchmarks/thirdparty/benchmark/test/cxx03_test.cc deleted file mode 100755 index c4c9a52273..0000000000 --- a/benchmarks/thirdparty/benchmark/test/cxx03_test.cc +++ /dev/null @@ -1,63 +0,0 @@ -#undef NDEBUG -#include -#include - -#include "benchmark/benchmark.h" - -#if __cplusplus >= 201103L -#error C++11 or greater detected. Should be C++03. -#endif - -#ifdef BENCHMARK_HAS_CXX11 -#error C++11 or greater detected by the library. BENCHMARK_HAS_CXX11 is defined. -#endif - -void BM_empty(benchmark::State& state) { - while (state.KeepRunning()) { - volatile benchmark::IterationCount x = state.iterations(); - ((void)x); - } -} -BENCHMARK(BM_empty); - -// The new C++11 interface for args/ranges requires initializer list support. -// Therefore we provide the old interface to support C++03. -void BM_old_arg_range_interface(benchmark::State& state) { - assert((state.range(0) == 1 && state.range(1) == 2) || - (state.range(0) == 5 && state.range(1) == 6)); - while (state.KeepRunning()) { - } -} -BENCHMARK(BM_old_arg_range_interface)->ArgPair(1, 2)->RangePair(5, 5, 6, 6); - -template -void BM_template2(benchmark::State& state) { - BM_empty(state); -} -BENCHMARK_TEMPLATE2(BM_template2, int, long); - -template -void BM_template1(benchmark::State& state) { - BM_empty(state); -} -BENCHMARK_TEMPLATE(BM_template1, long); -BENCHMARK_TEMPLATE1(BM_template1, int); - -template -struct BM_Fixture : public ::benchmark::Fixture { -}; - -BENCHMARK_TEMPLATE_F(BM_Fixture, BM_template1, long)(benchmark::State& state) { - BM_empty(state); -} -BENCHMARK_TEMPLATE1_F(BM_Fixture, BM_template2, int)(benchmark::State& state) { - BM_empty(state); -} - -void BM_counters(benchmark::State& state) { - BM_empty(state); - state.counters["Foo"] = 2; -} -BENCHMARK(BM_counters); - -BENCHMARK_MAIN(); diff --git a/benchmarks/thirdparty/benchmark/test/diagnostics_test.cc b/benchmarks/thirdparty/benchmark/test/diagnostics_test.cc deleted file mode 100755 index dd64a33655..0000000000 --- a/benchmarks/thirdparty/benchmark/test/diagnostics_test.cc +++ /dev/null @@ -1,80 +0,0 @@ -// Testing: -// State::PauseTiming() -// State::ResumeTiming() -// Test that CHECK's within these function diagnose when they are called -// outside of the KeepRunning() loop. -// -// NOTE: Users should NOT include or use src/check.h. This is only done in -// order to test library internals. - -#include -#include - -#include "../src/check.h" -#include "benchmark/benchmark.h" - -#if defined(__GNUC__) && !defined(__EXCEPTIONS) -#define TEST_HAS_NO_EXCEPTIONS -#endif - -void TestHandler() { -#ifndef TEST_HAS_NO_EXCEPTIONS - throw std::logic_error(""); -#else - std::abort(); -#endif -} - -void try_invalid_pause_resume(benchmark::State& state) { -#if !defined(TEST_BENCHMARK_LIBRARY_HAS_NO_ASSERTIONS) && !defined(TEST_HAS_NO_EXCEPTIONS) - try { - state.PauseTiming(); - std::abort(); - } catch (std::logic_error const&) { - } - try { - state.ResumeTiming(); - std::abort(); - } catch (std::logic_error const&) { - } -#else - (void)state; // avoid unused warning -#endif -} - -void BM_diagnostic_test(benchmark::State& state) { - static bool called_once = false; - - if (called_once == false) try_invalid_pause_resume(state); - - for (auto _ : state) { - benchmark::DoNotOptimize(state.iterations()); - } - - if (called_once == false) try_invalid_pause_resume(state); - - called_once = true; -} -BENCHMARK(BM_diagnostic_test); - - -void BM_diagnostic_test_keep_running(benchmark::State& state) { - static bool called_once = false; - - if (called_once == false) try_invalid_pause_resume(state); - - while(state.KeepRunning()) { - benchmark::DoNotOptimize(state.iterations()); - } - - if (called_once == false) try_invalid_pause_resume(state); - - called_once = true; -} -BENCHMARK(BM_diagnostic_test_keep_running); - -int main(int argc, char* argv[]) { - benchmark::internal::GetAbortHandler() = &TestHandler; - benchmark::Initialize(&argc, argv); - benchmark::RunSpecifiedBenchmarks(); -} diff --git a/benchmarks/thirdparty/benchmark/test/display_aggregates_only_test.cc b/benchmarks/thirdparty/benchmark/test/display_aggregates_only_test.cc deleted file mode 100755 index 3c36d3f03c..0000000000 --- a/benchmarks/thirdparty/benchmark/test/display_aggregates_only_test.cc +++ /dev/null @@ -1,43 +0,0 @@ - -#undef NDEBUG -#include -#include - -#include "benchmark/benchmark.h" -#include "output_test.h" - -// Ok this test is super ugly. We want to check what happens with the file -// reporter in the presence of DisplayAggregatesOnly(). -// We do not care about console output, the normal tests check that already. - -void BM_SummaryRepeat(benchmark::State& state) { - for (auto _ : state) { - } -} -BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->DisplayAggregatesOnly(); - -int main(int argc, char* argv[]) { - const std::string output = GetFileReporterOutput(argc, argv); - - if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 6 || - SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3\"") != 3 || - SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_mean\"") != 1 || - SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_median\"") != - 1 || - SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"") != - 1) { - std::cout << "Precondition mismatch. Expected to only find 6 " - "occurrences of \"BM_SummaryRepeat/repeats:3\" substring:\n" - "\"name\": \"BM_SummaryRepeat/repeats:3\", " - "\"name\": \"BM_SummaryRepeat/repeats:3\", " - "\"name\": \"BM_SummaryRepeat/repeats:3\", " - "\"name\": \"BM_SummaryRepeat/repeats:3_mean\", " - "\"name\": \"BM_SummaryRepeat/repeats:3_median\", " - "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"\nThe entire " - "output:\n"; - std::cout << output; - return 1; - } - - return 0; -} diff --git a/benchmarks/thirdparty/benchmark/test/donotoptimize_assembly_test.cc b/benchmarks/thirdparty/benchmark/test/donotoptimize_assembly_test.cc deleted file mode 100755 index d4b0bab70e..0000000000 --- a/benchmarks/thirdparty/benchmark/test/donotoptimize_assembly_test.cc +++ /dev/null @@ -1,163 +0,0 @@ -#include - -#ifdef __clang__ -#pragma clang diagnostic ignored "-Wreturn-type" -#endif - -extern "C" { - -extern int ExternInt; -extern int ExternInt2; -extern int ExternInt3; - -inline int Add42(int x) { return x + 42; } - -struct NotTriviallyCopyable { - NotTriviallyCopyable(); - explicit NotTriviallyCopyable(int x) : value(x) {} - NotTriviallyCopyable(NotTriviallyCopyable const&); - int value; -}; - -struct Large { - int value; - int data[2]; -}; - -} -// CHECK-LABEL: test_with_rvalue: -extern "C" void test_with_rvalue() { - benchmark::DoNotOptimize(Add42(0)); - // CHECK: movl $42, %eax - // CHECK: ret -} - -// CHECK-LABEL: test_with_large_rvalue: -extern "C" void test_with_large_rvalue() { - benchmark::DoNotOptimize(Large{ExternInt, {ExternInt, ExternInt}}); - // CHECK: ExternInt(%rip) - // CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]] - // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) - // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) - // CHECK: ret -} - -// CHECK-LABEL: test_with_non_trivial_rvalue: -extern "C" void test_with_non_trivial_rvalue() { - benchmark::DoNotOptimize(NotTriviallyCopyable(ExternInt)); - // CHECK: mov{{l|q}} ExternInt(%rip) - // CHECK: ret -} - -// CHECK-LABEL: test_with_lvalue: -extern "C" void test_with_lvalue() { - int x = 101; - benchmark::DoNotOptimize(x); - // CHECK-GNU: movl $101, %eax - // CHECK-CLANG: movl $101, -{{[0-9]+}}(%[[REG:[a-z]+]]) - // CHECK: ret -} - -// CHECK-LABEL: test_with_large_lvalue: -extern "C" void test_with_large_lvalue() { - Large L{ExternInt, {ExternInt, ExternInt}}; - benchmark::DoNotOptimize(L); - // CHECK: ExternInt(%rip) - // CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]]) - // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) - // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) - // CHECK: ret -} - -// CHECK-LABEL: test_with_non_trivial_lvalue: -extern "C" void test_with_non_trivial_lvalue() { - NotTriviallyCopyable NTC(ExternInt); - benchmark::DoNotOptimize(NTC); - // CHECK: ExternInt(%rip) - // CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]]) - // CHECK: ret -} - -// CHECK-LABEL: test_with_const_lvalue: -extern "C" void test_with_const_lvalue() { - const int x = 123; - benchmark::DoNotOptimize(x); - // CHECK: movl $123, %eax - // CHECK: ret -} - -// CHECK-LABEL: test_with_large_const_lvalue: -extern "C" void test_with_large_const_lvalue() { - const Large L{ExternInt, {ExternInt, ExternInt}}; - benchmark::DoNotOptimize(L); - // CHECK: ExternInt(%rip) - // CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]]) - // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) - // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) - // CHECK: ret -} - -// CHECK-LABEL: test_with_non_trivial_const_lvalue: -extern "C" void test_with_non_trivial_const_lvalue() { - const NotTriviallyCopyable Obj(ExternInt); - benchmark::DoNotOptimize(Obj); - // CHECK: mov{{q|l}} ExternInt(%rip) - // CHECK: ret -} - -// CHECK-LABEL: test_div_by_two: -extern "C" int test_div_by_two(int input) { - int divisor = 2; - benchmark::DoNotOptimize(divisor); - return input / divisor; - // CHECK: movl $2, [[DEST:.*]] - // CHECK: idivl [[DEST]] - // CHECK: ret -} - -// CHECK-LABEL: test_inc_integer: -extern "C" int test_inc_integer() { - int x = 0; - for (int i=0; i < 5; ++i) - benchmark::DoNotOptimize(++x); - // CHECK: movl $1, [[DEST:.*]] - // CHECK: {{(addl \$1,|incl)}} [[DEST]] - // CHECK: {{(addl \$1,|incl)}} [[DEST]] - // CHECK: {{(addl \$1,|incl)}} [[DEST]] - // CHECK: {{(addl \$1,|incl)}} [[DEST]] - // CHECK-CLANG: movl [[DEST]], %eax - // CHECK: ret - return x; -} - -// CHECK-LABEL: test_pointer_rvalue -extern "C" void test_pointer_rvalue() { - // CHECK: movl $42, [[DEST:.*]] - // CHECK: leaq [[DEST]], %rax - // CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z]+]]) - // CHECK: ret - int x = 42; - benchmark::DoNotOptimize(&x); -} - -// CHECK-LABEL: test_pointer_const_lvalue: -extern "C" void test_pointer_const_lvalue() { - // CHECK: movl $42, [[DEST:.*]] - // CHECK: leaq [[DEST]], %rax - // CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z]+]]) - // CHECK: ret - int x = 42; - int * const xp = &x; - benchmark::DoNotOptimize(xp); -} - -// CHECK-LABEL: test_pointer_lvalue: -extern "C" void test_pointer_lvalue() { - // CHECK: movl $42, [[DEST:.*]] - // CHECK: leaq [[DEST]], %rax - // CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z+]+]]) - // CHECK: ret - int x = 42; - int *xp = &x; - benchmark::DoNotOptimize(xp); -} diff --git a/benchmarks/thirdparty/benchmark/test/donotoptimize_test.cc b/benchmarks/thirdparty/benchmark/test/donotoptimize_test.cc deleted file mode 100755 index 2ce92d1c72..0000000000 --- a/benchmarks/thirdparty/benchmark/test/donotoptimize_test.cc +++ /dev/null @@ -1,52 +0,0 @@ -#include "benchmark/benchmark.h" - -#include - -namespace { -#if defined(__GNUC__) -std::uint64_t double_up(const std::uint64_t x) __attribute__((const)); -#endif -std::uint64_t double_up(const std::uint64_t x) { return x * 2; } -} - -// Using DoNotOptimize on types like BitRef seem to cause a lot of problems -// with the inline assembly on both GCC and Clang. -struct BitRef { - int index; - unsigned char &byte; - -public: - static BitRef Make() { - static unsigned char arr[2] = {}; - BitRef b(1, arr[0]); - return b; - } -private: - BitRef(int i, unsigned char& b) : index(i), byte(b) {} -}; - -int main(int, char*[]) { - // this test verifies compilation of DoNotOptimize() for some types - - char buffer8[8] = ""; - benchmark::DoNotOptimize(buffer8); - - char buffer20[20] = ""; - benchmark::DoNotOptimize(buffer20); - - char buffer1024[1024] = ""; - benchmark::DoNotOptimize(buffer1024); - benchmark::DoNotOptimize(&buffer1024[0]); - - int x = 123; - benchmark::DoNotOptimize(x); - benchmark::DoNotOptimize(&x); - benchmark::DoNotOptimize(x += 42); - - benchmark::DoNotOptimize(double_up(x)); - - // These tests are to e - benchmark::DoNotOptimize(BitRef::Make()); - BitRef lval = BitRef::Make(); - benchmark::DoNotOptimize(lval); -} diff --git a/benchmarks/thirdparty/benchmark/test/filter_test.cc b/benchmarks/thirdparty/benchmark/test/filter_test.cc deleted file mode 100755 index 0e27065c15..0000000000 --- a/benchmarks/thirdparty/benchmark/test/filter_test.cc +++ /dev/null @@ -1,104 +0,0 @@ -#include "benchmark/benchmark.h" - -#include -#include -#include -#include - -#include -#include -#include -#include - -namespace { - -class TestReporter : public benchmark::ConsoleReporter { - public: - virtual bool ReportContext(const Context& context) { - return ConsoleReporter::ReportContext(context); - }; - - virtual void ReportRuns(const std::vector& report) { - ++count_; - ConsoleReporter::ReportRuns(report); - }; - - TestReporter() : count_(0) {} - - virtual ~TestReporter() {} - - size_t GetCount() const { return count_; } - - private: - mutable size_t count_; -}; - -} // end namespace - -static void NoPrefix(benchmark::State& state) { - for (auto _ : state) { - } -} -BENCHMARK(NoPrefix); - -static void BM_Foo(benchmark::State& state) { - for (auto _ : state) { - } -} -BENCHMARK(BM_Foo); - -static void BM_Bar(benchmark::State& state) { - for (auto _ : state) { - } -} -BENCHMARK(BM_Bar); - -static void BM_FooBar(benchmark::State& state) { - for (auto _ : state) { - } -} -BENCHMARK(BM_FooBar); - -static void BM_FooBa(benchmark::State& state) { - for (auto _ : state) { - } -} -BENCHMARK(BM_FooBa); - -int main(int argc, char **argv) { - bool list_only = false; - for (int i = 0; i < argc; ++i) - list_only |= std::string(argv[i]).find("--benchmark_list_tests") != - std::string::npos; - - benchmark::Initialize(&argc, argv); - - TestReporter test_reporter; - const size_t returned_count = - benchmark::RunSpecifiedBenchmarks(&test_reporter); - - if (argc == 2) { - // Make sure we ran all of the tests - std::stringstream ss(argv[1]); - size_t expected_return; - ss >> expected_return; - - if (returned_count != expected_return) { - std::cerr << "ERROR: Expected " << expected_return - << " tests to match the filter but returned_count = " - << returned_count << std::endl; - return -1; - } - - const size_t expected_reports = list_only ? 0 : expected_return; - const size_t reports_count = test_reporter.GetCount(); - if (reports_count != expected_reports) { - std::cerr << "ERROR: Expected " << expected_reports - << " tests to be run but reported_count = " << reports_count - << std::endl; - return -1; - } - } - - return 0; -} diff --git a/benchmarks/thirdparty/benchmark/test/fixture_test.cc b/benchmarks/thirdparty/benchmark/test/fixture_test.cc deleted file mode 100755 index 1462b10f02..0000000000 --- a/benchmarks/thirdparty/benchmark/test/fixture_test.cc +++ /dev/null @@ -1,49 +0,0 @@ - -#include "benchmark/benchmark.h" - -#include -#include - -class MyFixture : public ::benchmark::Fixture { - public: - void SetUp(const ::benchmark::State& state) { - if (state.thread_index == 0) { - assert(data.get() == nullptr); - data.reset(new int(42)); - } - } - - void TearDown(const ::benchmark::State& state) { - if (state.thread_index == 0) { - assert(data.get() != nullptr); - data.reset(); - } - } - - ~MyFixture() { assert(data == nullptr); } - - std::unique_ptr data; -}; - -BENCHMARK_F(MyFixture, Foo)(benchmark::State &st) { - assert(data.get() != nullptr); - assert(*data == 42); - for (auto _ : st) { - } -} - -BENCHMARK_DEFINE_F(MyFixture, Bar)(benchmark::State& st) { - if (st.thread_index == 0) { - assert(data.get() != nullptr); - assert(*data == 42); - } - for (auto _ : st) { - assert(data.get() != nullptr); - assert(*data == 42); - } - st.SetItemsProcessed(st.range(0)); -} -BENCHMARK_REGISTER_F(MyFixture, Bar)->Arg(42); -BENCHMARK_REGISTER_F(MyFixture, Bar)->Arg(42)->ThreadPerCpu(); - -BENCHMARK_MAIN(); diff --git a/benchmarks/thirdparty/benchmark/test/internal_threading_test.cc b/benchmarks/thirdparty/benchmark/test/internal_threading_test.cc deleted file mode 100755 index 039d7c14a8..0000000000 --- a/benchmarks/thirdparty/benchmark/test/internal_threading_test.cc +++ /dev/null @@ -1,184 +0,0 @@ - -#undef NDEBUG - -#include -#include -#include "../src/timers.h" -#include "benchmark/benchmark.h" -#include "output_test.h" - -static const std::chrono::duration time_frame(50); -static const double time_frame_in_sec( - std::chrono::duration_cast>>( - time_frame) - .count()); - -void MyBusySpinwait() { - const auto start = benchmark::ChronoClockNow(); - - while (true) { - const auto now = benchmark::ChronoClockNow(); - const auto elapsed = now - start; - - if (std::chrono::duration(elapsed) >= - time_frame) - return; - } -} - -// ========================================================================= // -// --------------------------- TEST CASES BEGIN ---------------------------- // -// ========================================================================= // - -// ========================================================================= // -// BM_MainThread - -void BM_MainThread(benchmark::State& state) { - for (auto _ : state) { - MyBusySpinwait(); - state.SetIterationTime(time_frame_in_sec); - } - state.counters["invtime"] = - benchmark::Counter{1, benchmark::Counter::kIsRate}; -} - -BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1); -BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->UseRealTime(); -BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->UseManualTime(); -BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime(); -BENCHMARK(BM_MainThread) - ->Iterations(1) - ->Threads(1) - ->MeasureProcessCPUTime() - ->UseRealTime(); -BENCHMARK(BM_MainThread) - ->Iterations(1) - ->Threads(1) - ->MeasureProcessCPUTime() - ->UseManualTime(); - -BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2); -BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->UseRealTime(); -BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->UseManualTime(); -BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime(); -BENCHMARK(BM_MainThread) - ->Iterations(1) - ->Threads(2) - ->MeasureProcessCPUTime() - ->UseRealTime(); -BENCHMARK(BM_MainThread) - ->Iterations(1) - ->Threads(2) - ->MeasureProcessCPUTime() - ->UseManualTime(); - -// ========================================================================= // -// BM_WorkerThread - -void BM_WorkerThread(benchmark::State& state) { - for (auto _ : state) { - std::thread Worker(&MyBusySpinwait); - Worker.join(); - state.SetIterationTime(time_frame_in_sec); - } - state.counters["invtime"] = - benchmark::Counter{1, benchmark::Counter::kIsRate}; -} - -BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1); -BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->UseRealTime(); -BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->UseManualTime(); -BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime(); -BENCHMARK(BM_WorkerThread) - ->Iterations(1) - ->Threads(1) - ->MeasureProcessCPUTime() - ->UseRealTime(); -BENCHMARK(BM_WorkerThread) - ->Iterations(1) - ->Threads(1) - ->MeasureProcessCPUTime() - ->UseManualTime(); - -BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2); -BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->UseRealTime(); -BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->UseManualTime(); -BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime(); -BENCHMARK(BM_WorkerThread) - ->Iterations(1) - ->Threads(2) - ->MeasureProcessCPUTime() - ->UseRealTime(); -BENCHMARK(BM_WorkerThread) - ->Iterations(1) - ->Threads(2) - ->MeasureProcessCPUTime() - ->UseManualTime(); - -// ========================================================================= // -// BM_MainThreadAndWorkerThread - -void BM_MainThreadAndWorkerThread(benchmark::State& state) { - for (auto _ : state) { - std::thread Worker(&MyBusySpinwait); - MyBusySpinwait(); - Worker.join(); - state.SetIterationTime(time_frame_in_sec); - } - state.counters["invtime"] = - benchmark::Counter{1, benchmark::Counter::kIsRate}; -} - -BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(1); -BENCHMARK(BM_MainThreadAndWorkerThread) - ->Iterations(1) - ->Threads(1) - ->UseRealTime(); -BENCHMARK(BM_MainThreadAndWorkerThread) - ->Iterations(1) - ->Threads(1) - ->UseManualTime(); -BENCHMARK(BM_MainThreadAndWorkerThread) - ->Iterations(1) - ->Threads(1) - ->MeasureProcessCPUTime(); -BENCHMARK(BM_MainThreadAndWorkerThread) - ->Iterations(1) - ->Threads(1) - ->MeasureProcessCPUTime() - ->UseRealTime(); -BENCHMARK(BM_MainThreadAndWorkerThread) - ->Iterations(1) - ->Threads(1) - ->MeasureProcessCPUTime() - ->UseManualTime(); - -BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(2); -BENCHMARK(BM_MainThreadAndWorkerThread) - ->Iterations(1) - ->Threads(2) - ->UseRealTime(); -BENCHMARK(BM_MainThreadAndWorkerThread) - ->Iterations(1) - ->Threads(2) - ->UseManualTime(); -BENCHMARK(BM_MainThreadAndWorkerThread) - ->Iterations(1) - ->Threads(2) - ->MeasureProcessCPUTime(); -BENCHMARK(BM_MainThreadAndWorkerThread) - ->Iterations(1) - ->Threads(2) - ->MeasureProcessCPUTime() - ->UseRealTime(); -BENCHMARK(BM_MainThreadAndWorkerThread) - ->Iterations(1) - ->Threads(2) - ->MeasureProcessCPUTime() - ->UseManualTime(); - -// ========================================================================= // -// ---------------------------- TEST CASES END ----------------------------- // -// ========================================================================= // - -int main(int argc, char* argv[]) { RunOutputTests(argc, argv); } diff --git a/benchmarks/thirdparty/benchmark/test/link_main_test.cc b/benchmarks/thirdparty/benchmark/test/link_main_test.cc deleted file mode 100755 index 241ad5c390..0000000000 --- a/benchmarks/thirdparty/benchmark/test/link_main_test.cc +++ /dev/null @@ -1,8 +0,0 @@ -#include "benchmark/benchmark.h" - -void BM_empty(benchmark::State& state) { - for (auto _ : state) { - benchmark::DoNotOptimize(state.iterations()); - } -} -BENCHMARK(BM_empty); diff --git a/benchmarks/thirdparty/benchmark/test/map_test.cc b/benchmarks/thirdparty/benchmark/test/map_test.cc deleted file mode 100755 index dbf7982a36..0000000000 --- a/benchmarks/thirdparty/benchmark/test/map_test.cc +++ /dev/null @@ -1,57 +0,0 @@ -#include "benchmark/benchmark.h" - -#include -#include - -namespace { - -std::map ConstructRandomMap(int size) { - std::map m; - for (int i = 0; i < size; ++i) { - m.insert(std::make_pair(std::rand() % size, std::rand() % size)); - } - return m; -} - -} // namespace - -// Basic version. -static void BM_MapLookup(benchmark::State& state) { - const int size = static_cast(state.range(0)); - std::map m; - for (auto _ : state) { - state.PauseTiming(); - m = ConstructRandomMap(size); - state.ResumeTiming(); - for (int i = 0; i < size; ++i) { - benchmark::DoNotOptimize(m.find(std::rand() % size)); - } - } - state.SetItemsProcessed(state.iterations() * size); -} -BENCHMARK(BM_MapLookup)->Range(1 << 3, 1 << 12); - -// Using fixtures. -class MapFixture : public ::benchmark::Fixture { - public: - void SetUp(const ::benchmark::State& st) { - m = ConstructRandomMap(static_cast(st.range(0))); - } - - void TearDown(const ::benchmark::State&) { m.clear(); } - - std::map m; -}; - -BENCHMARK_DEFINE_F(MapFixture, Lookup)(benchmark::State& state) { - const int size = static_cast(state.range(0)); - for (auto _ : state) { - for (int i = 0; i < size; ++i) { - benchmark::DoNotOptimize(m.find(std::rand() % size)); - } - } - state.SetItemsProcessed(state.iterations() * size); -} -BENCHMARK_REGISTER_F(MapFixture, Lookup)->Range(1 << 3, 1 << 12); - -BENCHMARK_MAIN(); diff --git a/benchmarks/thirdparty/benchmark/test/memory_manager_test.cc b/benchmarks/thirdparty/benchmark/test/memory_manager_test.cc deleted file mode 100755 index 90bed16cff..0000000000 --- a/benchmarks/thirdparty/benchmark/test/memory_manager_test.cc +++ /dev/null @@ -1,44 +0,0 @@ -#include - -#include "../src/check.h" -#include "benchmark/benchmark.h" -#include "output_test.h" - -class TestMemoryManager : public benchmark::MemoryManager { - void Start() {} - void Stop(Result* result) { - result->num_allocs = 42; - result->max_bytes_used = 42000; - } -}; - -void BM_empty(benchmark::State& state) { - for (auto _ : state) { - benchmark::DoNotOptimize(state.iterations()); - } -} -BENCHMARK(BM_empty); - -ADD_CASES(TC_ConsoleOut, {{"^BM_empty %console_report$"}}); -ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_empty\",$"}, - {"\"run_name\": \"BM_empty\",$", MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, - {"\"repetition_index\": 0,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %float,$", MR_Next}, - {"\"cpu_time\": %float,$", MR_Next}, - {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"allocs_per_iter\": %float,$", MR_Next}, - {"\"max_bytes_used\": 42000$", MR_Next}, - {"}", MR_Next}}); -ADD_CASES(TC_CSVOut, {{"^\"BM_empty\",%csv_report$"}}); - -int main(int argc, char* argv[]) { - std::unique_ptr mm(new TestMemoryManager()); - - benchmark::RegisterMemoryManager(mm.get()); - RunOutputTests(argc, argv); - benchmark::RegisterMemoryManager(nullptr); -} diff --git a/benchmarks/thirdparty/benchmark/test/multiple_ranges_test.cc b/benchmarks/thirdparty/benchmark/test/multiple_ranges_test.cc deleted file mode 100755 index b25f40eb52..0000000000 --- a/benchmarks/thirdparty/benchmark/test/multiple_ranges_test.cc +++ /dev/null @@ -1,96 +0,0 @@ -#include "benchmark/benchmark.h" - -#include -#include -#include -#include - -class MultipleRangesFixture : public ::benchmark::Fixture { - public: - MultipleRangesFixture() - : expectedValues({{1, 3, 5}, - {1, 3, 8}, - {1, 3, 15}, - {2, 3, 5}, - {2, 3, 8}, - {2, 3, 15}, - {1, 4, 5}, - {1, 4, 8}, - {1, 4, 15}, - {2, 4, 5}, - {2, 4, 8}, - {2, 4, 15}, - {1, 7, 5}, - {1, 7, 8}, - {1, 7, 15}, - {2, 7, 5}, - {2, 7, 8}, - {2, 7, 15}, - {7, 6, 3}}) {} - - void SetUp(const ::benchmark::State& state) { - std::vector ranges = {state.range(0), state.range(1), - state.range(2)}; - - assert(expectedValues.find(ranges) != expectedValues.end()); - - actualValues.insert(ranges); - } - - // NOTE: This is not TearDown as we want to check after _all_ runs are - // complete. - virtual ~MultipleRangesFixture() { - if (actualValues != expectedValues) { - std::cout << "EXPECTED\n"; - for (auto v : expectedValues) { - std::cout << "{"; - for (int64_t iv : v) { - std::cout << iv << ", "; - } - std::cout << "}\n"; - } - std::cout << "ACTUAL\n"; - for (auto v : actualValues) { - std::cout << "{"; - for (int64_t iv : v) { - std::cout << iv << ", "; - } - std::cout << "}\n"; - } - } - } - - std::set> expectedValues; - std::set> actualValues; -}; - -BENCHMARK_DEFINE_F(MultipleRangesFixture, Empty)(benchmark::State& state) { - for (auto _ : state) { - int64_t product = state.range(0) * state.range(1) * state.range(2); - for (int64_t x = 0; x < product; x++) { - benchmark::DoNotOptimize(x); - } - } -} - -BENCHMARK_REGISTER_F(MultipleRangesFixture, Empty) - ->RangeMultiplier(2) - ->Ranges({{1, 2}, {3, 7}, {5, 15}}) - ->Args({7, 6, 3}); - -void BM_CheckDefaultArgument(benchmark::State& state) { - // Test that the 'range()' without an argument is the same as 'range(0)'. - assert(state.range() == state.range(0)); - assert(state.range() != state.range(1)); - for (auto _ : state) { - } -} -BENCHMARK(BM_CheckDefaultArgument)->Ranges({{1, 5}, {6, 10}}); - -static void BM_MultipleRanges(benchmark::State& st) { - for (auto _ : st) { - } -} -BENCHMARK(BM_MultipleRanges)->Ranges({{5, 5}, {6, 6}}); - -BENCHMARK_MAIN(); diff --git a/benchmarks/thirdparty/benchmark/test/options_test.cc b/benchmarks/thirdparty/benchmark/test/options_test.cc deleted file mode 100755 index 7bfc235465..0000000000 --- a/benchmarks/thirdparty/benchmark/test/options_test.cc +++ /dev/null @@ -1,75 +0,0 @@ -#include "benchmark/benchmark.h" -#include -#include - -#if defined(NDEBUG) -#undef NDEBUG -#endif -#include - -void BM_basic(benchmark::State& state) { - for (auto _ : state) { - } -} - -void BM_basic_slow(benchmark::State& state) { - std::chrono::milliseconds sleep_duration(state.range(0)); - for (auto _ : state) { - std::this_thread::sleep_for( - std::chrono::duration_cast(sleep_duration)); - } -} - -BENCHMARK(BM_basic); -BENCHMARK(BM_basic)->Arg(42); -BENCHMARK(BM_basic_slow)->Arg(10)->Unit(benchmark::kNanosecond); -BENCHMARK(BM_basic_slow)->Arg(100)->Unit(benchmark::kMicrosecond); -BENCHMARK(BM_basic_slow)->Arg(1000)->Unit(benchmark::kMillisecond); -BENCHMARK(BM_basic)->Range(1, 8); -BENCHMARK(BM_basic)->RangeMultiplier(2)->Range(1, 8); -BENCHMARK(BM_basic)->DenseRange(10, 15); -BENCHMARK(BM_basic)->Args({42, 42}); -BENCHMARK(BM_basic)->Ranges({{64, 512}, {64, 512}}); -BENCHMARK(BM_basic)->MinTime(0.7); -BENCHMARK(BM_basic)->UseRealTime(); -BENCHMARK(BM_basic)->ThreadRange(2, 4); -BENCHMARK(BM_basic)->ThreadPerCpu(); -BENCHMARK(BM_basic)->Repetitions(3); -BENCHMARK(BM_basic) - ->RangeMultiplier(std::numeric_limits::max()) - ->Range(std::numeric_limits::min(), - std::numeric_limits::max()); - -// Negative ranges -BENCHMARK(BM_basic)->Range(-64, -1); -BENCHMARK(BM_basic)->RangeMultiplier(4)->Range(-8, 8); -BENCHMARK(BM_basic)->DenseRange(-2, 2, 1); -BENCHMARK(BM_basic)->Ranges({{-64, 1}, {-8, -1}}); - -void CustomArgs(benchmark::internal::Benchmark* b) { - for (int i = 0; i < 10; ++i) { - b->Arg(i); - } -} - -BENCHMARK(BM_basic)->Apply(CustomArgs); - -void BM_explicit_iteration_count(benchmark::State& state) { - // Test that benchmarks specified with an explicit iteration count are - // only run once. - static bool invoked_before = false; - assert(!invoked_before); - invoked_before = true; - - // Test that the requested iteration count is respected. - assert(state.max_iterations == 42); - size_t actual_iterations = 0; - for (auto _ : state) - ++actual_iterations; - assert(state.iterations() == state.max_iterations); - assert(state.iterations() == 42); - -} -BENCHMARK(BM_explicit_iteration_count)->Iterations(42); - -BENCHMARK_MAIN(); diff --git a/benchmarks/thirdparty/benchmark/test/output_test.h b/benchmarks/thirdparty/benchmark/test/output_test.h deleted file mode 100755 index 9385761b21..0000000000 --- a/benchmarks/thirdparty/benchmark/test/output_test.h +++ /dev/null @@ -1,213 +0,0 @@ -#ifndef TEST_OUTPUT_TEST_H -#define TEST_OUTPUT_TEST_H - -#undef NDEBUG -#include -#include -#include -#include -#include -#include -#include - -#include "../src/re.h" -#include "benchmark/benchmark.h" - -#define CONCAT2(x, y) x##y -#define CONCAT(x, y) CONCAT2(x, y) - -#define ADD_CASES(...) int CONCAT(dummy, __LINE__) = ::AddCases(__VA_ARGS__) - -#define SET_SUBSTITUTIONS(...) \ - int CONCAT(dummy, __LINE__) = ::SetSubstitutions(__VA_ARGS__) - -enum MatchRules { - MR_Default, // Skip non-matching lines until a match is found. - MR_Next, // Match must occur on the next line. - MR_Not // No line between the current position and the next match matches - // the regex -}; - -struct TestCase { - TestCase(std::string re, int rule = MR_Default); - - std::string regex_str; - int match_rule; - std::string substituted_regex; - std::shared_ptr regex; -}; - -enum TestCaseID { - TC_ConsoleOut, - TC_ConsoleErr, - TC_JSONOut, - TC_JSONErr, - TC_CSVOut, - TC_CSVErr, - - TC_NumID // PRIVATE -}; - -// Add a list of test cases to be run against the output specified by -// 'ID' -int AddCases(TestCaseID ID, std::initializer_list il); - -// Add or set a list of substitutions to be performed on constructed regex's -// See 'output_test_helper.cc' for a list of default substitutions. -int SetSubstitutions( - std::initializer_list> il); - -// Run all output tests. -void RunOutputTests(int argc, char* argv[]); - -// Count the number of 'pat' substrings in the 'haystack' string. -int SubstrCnt(const std::string& haystack, const std::string& pat); - -// Run registered benchmarks with file reporter enabled, and return the content -// outputted by the file reporter. -std::string GetFileReporterOutput(int argc, char* argv[]); - -// ========================================================================= // -// ------------------------- Results checking ------------------------------ // -// ========================================================================= // - -// Call this macro to register a benchmark for checking its results. This -// should be all that's needed. It subscribes a function to check the (CSV) -// results of a benchmark. This is done only after verifying that the output -// strings are really as expected. -// bm_name_pattern: a name or a regex pattern which will be matched against -// all the benchmark names. Matching benchmarks -// will be the subject of a call to checker_function -// checker_function: should be of type ResultsCheckFn (see below) -#define CHECK_BENCHMARK_RESULTS(bm_name_pattern, checker_function) \ - size_t CONCAT(dummy, __LINE__) = AddChecker(bm_name_pattern, checker_function) - -struct Results; -typedef std::function ResultsCheckFn; - -size_t AddChecker(const char* bm_name_pattern, ResultsCheckFn fn); - -// Class holding the results of a benchmark. -// It is passed in calls to checker functions. -struct Results { - // the benchmark name - std::string name; - // the benchmark fields - std::map values; - - Results(const std::string& n) : name(n) {} - - int NumThreads() const; - - double NumIterations() const; - - typedef enum { kCpuTime, kRealTime } BenchmarkTime; - - // get cpu_time or real_time in seconds - double GetTime(BenchmarkTime which) const; - - // get the real_time duration of the benchmark in seconds. - // it is better to use fuzzy float checks for this, as the float - // ASCII formatting is lossy. - double DurationRealTime() const { - return NumIterations() * GetTime(kRealTime); - } - // get the cpu_time duration of the benchmark in seconds - double DurationCPUTime() const { - return NumIterations() * GetTime(kCpuTime); - } - - // get the string for a result by name, or nullptr if the name - // is not found - const std::string* Get(const char* entry_name) const { - auto it = values.find(entry_name); - if (it == values.end()) return nullptr; - return &it->second; - } - - // get a result by name, parsed as a specific type. - // NOTE: for counters, use GetCounterAs instead. - template - T GetAs(const char* entry_name) const; - - // counters are written as doubles, so they have to be read first - // as a double, and only then converted to the asked type. - template - T GetCounterAs(const char* entry_name) const { - double dval = GetAs(entry_name); - T tval = static_cast(dval); - return tval; - } -}; - -template -T Results::GetAs(const char* entry_name) const { - auto* sv = Get(entry_name); - CHECK(sv != nullptr && !sv->empty()); - std::stringstream ss; - ss << *sv; - T out; - ss >> out; - CHECK(!ss.fail()); - return out; -} - -//---------------------------------- -// Macros to help in result checking. Do not use them with arguments causing -// side-effects. - -// clang-format off - -#define _CHECK_RESULT_VALUE(entry, getfn, var_type, var_name, relationship, value) \ - CONCAT(CHECK_, relationship) \ - (entry.getfn< var_type >(var_name), (value)) << "\n" \ - << __FILE__ << ":" << __LINE__ << ": " << (entry).name << ":\n" \ - << __FILE__ << ":" << __LINE__ << ": " \ - << "expected (" << #var_type << ")" << (var_name) \ - << "=" << (entry).getfn< var_type >(var_name) \ - << " to be " #relationship " to " << (value) << "\n" - -// check with tolerance. eps_factor is the tolerance window, which is -// interpreted relative to value (eg, 0.1 means 10% of value). -#define _CHECK_FLOAT_RESULT_VALUE(entry, getfn, var_type, var_name, relationship, value, eps_factor) \ - CONCAT(CHECK_FLOAT_, relationship) \ - (entry.getfn< var_type >(var_name), (value), (eps_factor) * (value)) << "\n" \ - << __FILE__ << ":" << __LINE__ << ": " << (entry).name << ":\n" \ - << __FILE__ << ":" << __LINE__ << ": " \ - << "expected (" << #var_type << ")" << (var_name) \ - << "=" << (entry).getfn< var_type >(var_name) \ - << " to be " #relationship " to " << (value) << "\n" \ - << __FILE__ << ":" << __LINE__ << ": " \ - << "with tolerance of " << (eps_factor) * (value) \ - << " (" << (eps_factor)*100. << "%), " \ - << "but delta was " << ((entry).getfn< var_type >(var_name) - (value)) \ - << " (" << (((entry).getfn< var_type >(var_name) - (value)) \ - / \ - ((value) > 1.e-5 || value < -1.e-5 ? value : 1.e-5)*100.) \ - << "%)" - -#define CHECK_RESULT_VALUE(entry, var_type, var_name, relationship, value) \ - _CHECK_RESULT_VALUE(entry, GetAs, var_type, var_name, relationship, value) - -#define CHECK_COUNTER_VALUE(entry, var_type, var_name, relationship, value) \ - _CHECK_RESULT_VALUE(entry, GetCounterAs, var_type, var_name, relationship, value) - -#define CHECK_FLOAT_RESULT_VALUE(entry, var_name, relationship, value, eps_factor) \ - _CHECK_FLOAT_RESULT_VALUE(entry, GetAs, double, var_name, relationship, value, eps_factor) - -#define CHECK_FLOAT_COUNTER_VALUE(entry, var_name, relationship, value, eps_factor) \ - _CHECK_FLOAT_RESULT_VALUE(entry, GetCounterAs, double, var_name, relationship, value, eps_factor) - -// clang-format on - -// ========================================================================= // -// --------------------------- Misc Utilities ------------------------------ // -// ========================================================================= // - -namespace { - -const char* const dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?"; - -} // end namespace - -#endif // TEST_OUTPUT_TEST_H diff --git a/benchmarks/thirdparty/benchmark/test/output_test_helper.cc b/benchmarks/thirdparty/benchmark/test/output_test_helper.cc deleted file mode 100755 index f99b3a8261..0000000000 --- a/benchmarks/thirdparty/benchmark/test/output_test_helper.cc +++ /dev/null @@ -1,515 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "../src/benchmark_api_internal.h" -#include "../src/check.h" // NOTE: check.h is for internal use only! -#include "../src/re.h" // NOTE: re.h is for internal use only -#include "output_test.h" - -// ========================================================================= // -// ------------------------------ Internals -------------------------------- // -// ========================================================================= // -namespace internal { -namespace { - -using TestCaseList = std::vector; - -// Use a vector because the order elements are added matters during iteration. -// std::map/unordered_map don't guarantee that. -// For example: -// SetSubstitutions({{"%HelloWorld", "Hello"}, {"%Hello", "Hi"}}); -// Substitute("%HelloWorld") // Always expands to Hello. -using SubMap = std::vector>; - -TestCaseList& GetTestCaseList(TestCaseID ID) { - // Uses function-local statics to ensure initialization occurs - // before first use. - static TestCaseList lists[TC_NumID]; - return lists[ID]; -} - -SubMap& GetSubstitutions() { - // Don't use 'dec_re' from header because it may not yet be initialized. - // clang-format off - static std::string safe_dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?"; - static std::string time_re = "([0-9]+[.])?[0-9]+"; - static SubMap map = { - {"%float", "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?"}, - // human-readable float - {"%hrfloat", "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?[kMGTPEZYmunpfazy]?"}, - {"%int", "[ ]*[0-9]+"}, - {" %s ", "[ ]+"}, - {"%time", "[ ]*" + time_re + "[ ]+ns"}, - {"%console_report", "[ ]*" + time_re + "[ ]+ns [ ]*" + time_re + "[ ]+ns [ ]*[0-9]+"}, - {"%console_time_only_report", "[ ]*" + time_re + "[ ]+ns [ ]*" + time_re + "[ ]+ns"}, - {"%console_us_report", "[ ]*" + time_re + "[ ]+us [ ]*" + time_re + "[ ]+us [ ]*[0-9]+"}, - {"%console_us_time_only_report", "[ ]*" + time_re + "[ ]+us [ ]*" + time_re + "[ ]+us"}, - {"%csv_header", - "name,iterations,real_time,cpu_time,time_unit,bytes_per_second," - "items_per_second,label,error_occurred,error_message"}, - {"%csv_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,,,,,"}, - {"%csv_us_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",us,,,,,"}, - {"%csv_bytes_report", - "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns," + safe_dec_re + ",,,,"}, - {"%csv_items_report", - "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,," + safe_dec_re + ",,,"}, - {"%csv_bytes_items_report", - "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns," + safe_dec_re + - "," + safe_dec_re + ",,,"}, - {"%csv_label_report_begin", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,,,"}, - {"%csv_label_report_end", ",,"}}; - // clang-format on - return map; -} - -std::string PerformSubstitutions(std::string source) { - SubMap const& subs = GetSubstitutions(); - using SizeT = std::string::size_type; - for (auto const& KV : subs) { - SizeT pos; - SizeT next_start = 0; - while ((pos = source.find(KV.first, next_start)) != std::string::npos) { - next_start = pos + KV.second.size(); - source.replace(pos, KV.first.size(), KV.second); - } - } - return source; -} - -void CheckCase(std::stringstream& remaining_output, TestCase const& TC, - TestCaseList const& not_checks) { - std::string first_line; - bool on_first = true; - std::string line; - while (remaining_output.eof() == false) { - CHECK(remaining_output.good()); - std::getline(remaining_output, line); - if (on_first) { - first_line = line; - on_first = false; - } - for (const auto& NC : not_checks) { - CHECK(!NC.regex->Match(line)) - << "Unexpected match for line \"" << line << "\" for MR_Not regex \"" - << NC.regex_str << "\"" - << "\n actual regex string \"" << TC.substituted_regex << "\"" - << "\n started matching near: " << first_line; - } - if (TC.regex->Match(line)) return; - CHECK(TC.match_rule != MR_Next) - << "Expected line \"" << line << "\" to match regex \"" << TC.regex_str - << "\"" - << "\n actual regex string \"" << TC.substituted_regex << "\"" - << "\n started matching near: " << first_line; - } - CHECK(remaining_output.eof() == false) - << "End of output reached before match for regex \"" << TC.regex_str - << "\" was found" - << "\n actual regex string \"" << TC.substituted_regex << "\"" - << "\n started matching near: " << first_line; -} - -void CheckCases(TestCaseList const& checks, std::stringstream& output) { - std::vector not_checks; - for (size_t i = 0; i < checks.size(); ++i) { - const auto& TC = checks[i]; - if (TC.match_rule == MR_Not) { - not_checks.push_back(TC); - continue; - } - CheckCase(output, TC, not_checks); - not_checks.clear(); - } -} - -class TestReporter : public benchmark::BenchmarkReporter { - public: - TestReporter(std::vector reps) - : reporters_(reps) {} - - virtual bool ReportContext(const Context& context) { - bool last_ret = false; - bool first = true; - for (auto rep : reporters_) { - bool new_ret = rep->ReportContext(context); - CHECK(first || new_ret == last_ret) - << "Reports return different values for ReportContext"; - first = false; - last_ret = new_ret; - } - (void)first; - return last_ret; - } - - void ReportRuns(const std::vector& report) { - for (auto rep : reporters_) rep->ReportRuns(report); - } - void Finalize() { - for (auto rep : reporters_) rep->Finalize(); - } - - private: - std::vector reporters_; -}; -} // namespace - -} // end namespace internal - -// ========================================================================= // -// -------------------------- Results checking ----------------------------- // -// ========================================================================= // - -namespace internal { - -// Utility class to manage subscribers for checking benchmark results. -// It works by parsing the CSV output to read the results. -class ResultsChecker { - public: - struct PatternAndFn : public TestCase { // reusing TestCase for its regexes - PatternAndFn(const std::string& rx, ResultsCheckFn fn_) - : TestCase(rx), fn(fn_) {} - ResultsCheckFn fn; - }; - - std::vector check_patterns; - std::vector results; - std::vector field_names; - - void Add(const std::string& entry_pattern, ResultsCheckFn fn); - - void CheckResults(std::stringstream& output); - - private: - void SetHeader_(const std::string& csv_header); - void SetValues_(const std::string& entry_csv_line); - - std::vector SplitCsv_(const std::string& line); -}; - -// store the static ResultsChecker in a function to prevent initialization -// order problems -ResultsChecker& GetResultsChecker() { - static ResultsChecker rc; - return rc; -} - -// add a results checker for a benchmark -void ResultsChecker::Add(const std::string& entry_pattern, ResultsCheckFn fn) { - check_patterns.emplace_back(entry_pattern, fn); -} - -// check the results of all subscribed benchmarks -void ResultsChecker::CheckResults(std::stringstream& output) { - // first reset the stream to the start - { - auto start = std::stringstream::pos_type(0); - // clear before calling tellg() - output.clear(); - // seek to zero only when needed - if (output.tellg() > start) output.seekg(start); - // and just in case - output.clear(); - } - // now go over every line and publish it to the ResultsChecker - std::string line; - bool on_first = true; - while (output.eof() == false) { - CHECK(output.good()); - std::getline(output, line); - if (on_first) { - SetHeader_(line); // this is important - on_first = false; - continue; - } - SetValues_(line); - } - // finally we can call the subscribed check functions - for (const auto& p : check_patterns) { - VLOG(2) << "--------------------------------\n"; - VLOG(2) << "checking for benchmarks matching " << p.regex_str << "...\n"; - for (const auto& r : results) { - if (!p.regex->Match(r.name)) { - VLOG(2) << p.regex_str << " is not matched by " << r.name << "\n"; - continue; - } else { - VLOG(2) << p.regex_str << " is matched by " << r.name << "\n"; - } - VLOG(1) << "Checking results of " << r.name << ": ... \n"; - p.fn(r); - VLOG(1) << "Checking results of " << r.name << ": OK.\n"; - } - } -} - -// prepare for the names in this header -void ResultsChecker::SetHeader_(const std::string& csv_header) { - field_names = SplitCsv_(csv_header); -} - -// set the values for a benchmark -void ResultsChecker::SetValues_(const std::string& entry_csv_line) { - if (entry_csv_line.empty()) return; // some lines are empty - CHECK(!field_names.empty()); - auto vals = SplitCsv_(entry_csv_line); - CHECK_EQ(vals.size(), field_names.size()); - results.emplace_back(vals[0]); // vals[0] is the benchmark name - auto& entry = results.back(); - for (size_t i = 1, e = vals.size(); i < e; ++i) { - entry.values[field_names[i]] = vals[i]; - } -} - -// a quick'n'dirty csv splitter (eliminating quotes) -std::vector ResultsChecker::SplitCsv_(const std::string& line) { - std::vector out; - if (line.empty()) return out; - if (!field_names.empty()) out.reserve(field_names.size()); - size_t prev = 0, pos = line.find_first_of(','), curr = pos; - while (pos != line.npos) { - CHECK(curr > 0); - if (line[prev] == '"') ++prev; - if (line[curr - 1] == '"') --curr; - out.push_back(line.substr(prev, curr - prev)); - prev = pos + 1; - pos = line.find_first_of(',', pos + 1); - curr = pos; - } - curr = line.size(); - if (line[prev] == '"') ++prev; - if (line[curr - 1] == '"') --curr; - out.push_back(line.substr(prev, curr - prev)); - return out; -} - -} // end namespace internal - -size_t AddChecker(const char* bm_name, ResultsCheckFn fn) { - auto& rc = internal::GetResultsChecker(); - rc.Add(bm_name, fn); - return rc.results.size(); -} - -int Results::NumThreads() const { - auto pos = name.find("/threads:"); - if (pos == name.npos) return 1; - auto end = name.find('/', pos + 9); - std::stringstream ss; - ss << name.substr(pos + 9, end); - int num = 1; - ss >> num; - CHECK(!ss.fail()); - return num; -} - -double Results::NumIterations() const { - return GetAs("iterations"); -} - -double Results::GetTime(BenchmarkTime which) const { - CHECK(which == kCpuTime || which == kRealTime); - const char* which_str = which == kCpuTime ? "cpu_time" : "real_time"; - double val = GetAs(which_str); - auto unit = Get("time_unit"); - CHECK(unit); - if (*unit == "ns") { - return val * 1.e-9; - } else if (*unit == "us") { - return val * 1.e-6; - } else if (*unit == "ms") { - return val * 1.e-3; - } else if (*unit == "s") { - return val; - } else { - CHECK(1 == 0) << "unknown time unit: " << *unit; - return 0; - } -} - -// ========================================================================= // -// -------------------------- Public API Definitions------------------------ // -// ========================================================================= // - -TestCase::TestCase(std::string re, int rule) - : regex_str(std::move(re)), - match_rule(rule), - substituted_regex(internal::PerformSubstitutions(regex_str)), - regex(std::make_shared()) { - std::string err_str; - regex->Init(substituted_regex, &err_str); - CHECK(err_str.empty()) << "Could not construct regex \"" << substituted_regex - << "\"" - << "\n originally \"" << regex_str << "\"" - << "\n got error: " << err_str; -} - -int AddCases(TestCaseID ID, std::initializer_list il) { - auto& L = internal::GetTestCaseList(ID); - L.insert(L.end(), il); - return 0; -} - -int SetSubstitutions( - std::initializer_list> il) { - auto& subs = internal::GetSubstitutions(); - for (auto KV : il) { - bool exists = false; - KV.second = internal::PerformSubstitutions(KV.second); - for (auto& EKV : subs) { - if (EKV.first == KV.first) { - EKV.second = std::move(KV.second); - exists = true; - break; - } - } - if (!exists) subs.push_back(std::move(KV)); - } - return 0; -} - -// Disable deprecated warnings temporarily because we need to reference -// CSVReporter but don't want to trigger -Werror=-Wdeprecated-declarations -#ifdef __GNUC__ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" -#endif -void RunOutputTests(int argc, char* argv[]) { - using internal::GetTestCaseList; - benchmark::Initialize(&argc, argv); - auto options = benchmark::internal::GetOutputOptions(/*force_no_color*/ true); - benchmark::ConsoleReporter CR(options); - benchmark::JSONReporter JR; - benchmark::CSVReporter CSVR; - struct ReporterTest { - const char* name; - std::vector& output_cases; - std::vector& error_cases; - benchmark::BenchmarkReporter& reporter; - std::stringstream out_stream; - std::stringstream err_stream; - - ReporterTest(const char* n, std::vector& out_tc, - std::vector& err_tc, - benchmark::BenchmarkReporter& br) - : name(n), output_cases(out_tc), error_cases(err_tc), reporter(br) { - reporter.SetOutputStream(&out_stream); - reporter.SetErrorStream(&err_stream); - } - } TestCases[] = { - {"ConsoleReporter", GetTestCaseList(TC_ConsoleOut), - GetTestCaseList(TC_ConsoleErr), CR}, - {"JSONReporter", GetTestCaseList(TC_JSONOut), GetTestCaseList(TC_JSONErr), - JR}, - {"CSVReporter", GetTestCaseList(TC_CSVOut), GetTestCaseList(TC_CSVErr), - CSVR}, - }; - - // Create the test reporter and run the benchmarks. - std::cout << "Running benchmarks...\n"; - internal::TestReporter test_rep({&CR, &JR, &CSVR}); - benchmark::RunSpecifiedBenchmarks(&test_rep); - - for (auto& rep_test : TestCases) { - std::string msg = std::string("\nTesting ") + rep_test.name + " Output\n"; - std::string banner(msg.size() - 1, '-'); - std::cout << banner << msg << banner << "\n"; - - std::cerr << rep_test.err_stream.str(); - std::cout << rep_test.out_stream.str(); - - internal::CheckCases(rep_test.error_cases, rep_test.err_stream); - internal::CheckCases(rep_test.output_cases, rep_test.out_stream); - - std::cout << "\n"; - } - - // now that we know the output is as expected, we can dispatch - // the checks to subscribees. - auto& csv = TestCases[2]; - // would use == but gcc spits a warning - CHECK(std::strcmp(csv.name, "CSVReporter") == 0); - internal::GetResultsChecker().CheckResults(csv.out_stream); -} - -#ifdef __GNUC__ -#pragma GCC diagnostic pop -#endif - -int SubstrCnt(const std::string& haystack, const std::string& pat) { - if (pat.length() == 0) return 0; - int count = 0; - for (size_t offset = haystack.find(pat); offset != std::string::npos; - offset = haystack.find(pat, offset + pat.length())) - ++count; - return count; -} - -static char ToHex(int ch) { - return ch < 10 ? static_cast('0' + ch) - : static_cast('a' + (ch - 10)); -} - -static char RandomHexChar() { - static std::mt19937 rd{std::random_device{}()}; - static std::uniform_int_distribution mrand{0, 15}; - return ToHex(mrand(rd)); -} - -static std::string GetRandomFileName() { - std::string model = "test.%%%%%%"; - for (auto & ch : model) { - if (ch == '%') - ch = RandomHexChar(); - } - return model; -} - -static bool FileExists(std::string const& name) { - std::ifstream in(name.c_str()); - return in.good(); -} - -static std::string GetTempFileName() { - // This function attempts to avoid race conditions where two tests - // create the same file at the same time. However, it still introduces races - // similar to tmpnam. - int retries = 3; - while (--retries) { - std::string name = GetRandomFileName(); - if (!FileExists(name)) - return name; - } - std::cerr << "Failed to create unique temporary file name" << std::endl; - std::abort(); -} - -std::string GetFileReporterOutput(int argc, char* argv[]) { - std::vector new_argv(argv, argv + argc); - assert(static_cast(argc) == new_argv.size()); - - std::string tmp_file_name = GetTempFileName(); - std::cout << "Will be using this as the tmp file: " << tmp_file_name << '\n'; - - std::string tmp = "--benchmark_out="; - tmp += tmp_file_name; - new_argv.emplace_back(const_cast(tmp.c_str())); - - argc = int(new_argv.size()); - - benchmark::Initialize(&argc, new_argv.data()); - benchmark::RunSpecifiedBenchmarks(); - - // Read the output back from the file, and delete the file. - std::ifstream tmp_stream(tmp_file_name); - std::string output = std::string((std::istreambuf_iterator(tmp_stream)), - std::istreambuf_iterator()); - std::remove(tmp_file_name.c_str()); - - return output; -} diff --git a/benchmarks/thirdparty/benchmark/test/register_benchmark_test.cc b/benchmarks/thirdparty/benchmark/test/register_benchmark_test.cc deleted file mode 100755 index 3ac5b21fb3..0000000000 --- a/benchmarks/thirdparty/benchmark/test/register_benchmark_test.cc +++ /dev/null @@ -1,184 +0,0 @@ - -#undef NDEBUG -#include -#include - -#include "../src/check.h" // NOTE: check.h is for internal use only! -#include "benchmark/benchmark.h" - -namespace { - -class TestReporter : public benchmark::ConsoleReporter { - public: - virtual void ReportRuns(const std::vector& report) { - all_runs_.insert(all_runs_.end(), begin(report), end(report)); - ConsoleReporter::ReportRuns(report); - } - - std::vector all_runs_; -}; - -struct TestCase { - std::string name; - const char* label; - // Note: not explicit as we rely on it being converted through ADD_CASES. - TestCase(const char* xname) : TestCase(xname, nullptr) {} - TestCase(const char* xname, const char* xlabel) - : name(xname), label(xlabel) {} - - typedef benchmark::BenchmarkReporter::Run Run; - - void CheckRun(Run const& run) const { - // clang-format off - CHECK(name == run.benchmark_name()) << "expected " << name << " got " - << run.benchmark_name(); - if (label) { - CHECK(run.report_label == label) << "expected " << label << " got " - << run.report_label; - } else { - CHECK(run.report_label == ""); - } - // clang-format on - } -}; - -std::vector ExpectedResults; - -int AddCases(std::initializer_list const& v) { - for (auto N : v) { - ExpectedResults.push_back(N); - } - return 0; -} - -#define CONCAT(x, y) CONCAT2(x, y) -#define CONCAT2(x, y) x##y -#define ADD_CASES(...) int CONCAT(dummy, __LINE__) = AddCases({__VA_ARGS__}) - -} // end namespace - -typedef benchmark::internal::Benchmark* ReturnVal; - -//----------------------------------------------------------------------------// -// Test RegisterBenchmark with no additional arguments -//----------------------------------------------------------------------------// -void BM_function(benchmark::State& state) { - for (auto _ : state) { - } -} -BENCHMARK(BM_function); -ReturnVal dummy = benchmark::RegisterBenchmark( - "BM_function_manual_registration", BM_function); -ADD_CASES({"BM_function"}, {"BM_function_manual_registration"}); - -//----------------------------------------------------------------------------// -// Test RegisterBenchmark with additional arguments -// Note: GCC <= 4.8 do not support this form of RegisterBenchmark because they -// reject the variadic pack expansion of lambda captures. -//----------------------------------------------------------------------------// -#ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK - -void BM_extra_args(benchmark::State& st, const char* label) { - for (auto _ : st) { - } - st.SetLabel(label); -} -int RegisterFromFunction() { - std::pair cases[] = { - {"test1", "One"}, {"test2", "Two"}, {"test3", "Three"}}; - for (auto const& c : cases) - benchmark::RegisterBenchmark(c.first, &BM_extra_args, c.second); - return 0; -} -int dummy2 = RegisterFromFunction(); -ADD_CASES({"test1", "One"}, {"test2", "Two"}, {"test3", "Three"}); - -#endif // BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK - -//----------------------------------------------------------------------------// -// Test RegisterBenchmark with different callable types -//----------------------------------------------------------------------------// - -struct CustomFixture { - void operator()(benchmark::State& st) { - for (auto _ : st) { - } - } -}; - -void TestRegistrationAtRuntime() { -#ifdef BENCHMARK_HAS_CXX11 - { - CustomFixture fx; - benchmark::RegisterBenchmark("custom_fixture", fx); - AddCases({"custom_fixture"}); - } -#endif -#ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK - { - const char* x = "42"; - auto capturing_lam = [=](benchmark::State& st) { - for (auto _ : st) { - } - st.SetLabel(x); - }; - benchmark::RegisterBenchmark("lambda_benchmark", capturing_lam); - AddCases({{"lambda_benchmark", x}}); - } -#endif -} - -// Test that all benchmarks, registered at either during static init or runtime, -// are run and the results are passed to the reported. -void RunTestOne() { - TestRegistrationAtRuntime(); - - TestReporter test_reporter; - benchmark::RunSpecifiedBenchmarks(&test_reporter); - - typedef benchmark::BenchmarkReporter::Run Run; - auto EB = ExpectedResults.begin(); - - for (Run const& run : test_reporter.all_runs_) { - assert(EB != ExpectedResults.end()); - EB->CheckRun(run); - ++EB; - } - assert(EB == ExpectedResults.end()); -} - -// Test that ClearRegisteredBenchmarks() clears all previously registered -// benchmarks. -// Also test that new benchmarks can be registered and ran afterwards. -void RunTestTwo() { - assert(ExpectedResults.size() != 0 && - "must have at least one registered benchmark"); - ExpectedResults.clear(); - benchmark::ClearRegisteredBenchmarks(); - - TestReporter test_reporter; - size_t num_ran = benchmark::RunSpecifiedBenchmarks(&test_reporter); - assert(num_ran == 0); - assert(test_reporter.all_runs_.begin() == test_reporter.all_runs_.end()); - - TestRegistrationAtRuntime(); - num_ran = benchmark::RunSpecifiedBenchmarks(&test_reporter); - assert(num_ran == ExpectedResults.size()); - - typedef benchmark::BenchmarkReporter::Run Run; - auto EB = ExpectedResults.begin(); - - for (Run const& run : test_reporter.all_runs_) { - assert(EB != ExpectedResults.end()); - EB->CheckRun(run); - ++EB; - } - assert(EB == ExpectedResults.end()); -} - -int main(int argc, char* argv[]) { - benchmark::Initialize(&argc, argv); - - RunTestOne(); - RunTestTwo(); -} diff --git a/benchmarks/thirdparty/benchmark/test/report_aggregates_only_test.cc b/benchmarks/thirdparty/benchmark/test/report_aggregates_only_test.cc deleted file mode 100755 index 9646b9be53..0000000000 --- a/benchmarks/thirdparty/benchmark/test/report_aggregates_only_test.cc +++ /dev/null @@ -1,39 +0,0 @@ - -#undef NDEBUG -#include -#include - -#include "benchmark/benchmark.h" -#include "output_test.h" - -// Ok this test is super ugly. We want to check what happens with the file -// reporter in the presence of ReportAggregatesOnly(). -// We do not care about console output, the normal tests check that already. - -void BM_SummaryRepeat(benchmark::State& state) { - for (auto _ : state) { - } -} -BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly(); - -int main(int argc, char* argv[]) { - const std::string output = GetFileReporterOutput(argc, argv); - - if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 3 || - SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_mean\"") != 1 || - SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_median\"") != - 1 || - SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"") != - 1) { - std::cout << "Precondition mismatch. Expected to only find three " - "occurrences of \"BM_SummaryRepeat/repeats:3\" substring:\n" - "\"name\": \"BM_SummaryRepeat/repeats:3_mean\", " - "\"name\": \"BM_SummaryRepeat/repeats:3_median\", " - "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"\nThe entire " - "output:\n"; - std::cout << output; - return 1; - } - - return 0; -} diff --git a/benchmarks/thirdparty/benchmark/test/reporter_output_test.cc b/benchmarks/thirdparty/benchmark/test/reporter_output_test.cc deleted file mode 100755 index bcce007831..0000000000 --- a/benchmarks/thirdparty/benchmark/test/reporter_output_test.cc +++ /dev/null @@ -1,747 +0,0 @@ - -#undef NDEBUG -#include - -#include "benchmark/benchmark.h" -#include "output_test.h" - -// ========================================================================= // -// ---------------------- Testing Prologue Output -------------------------- // -// ========================================================================= // - -ADD_CASES(TC_ConsoleOut, {{"^[-]+$", MR_Next}, - {"^Benchmark %s Time %s CPU %s Iterations$", MR_Next}, - {"^[-]+$", MR_Next}}); -static int AddContextCases() { - AddCases(TC_ConsoleErr, - { - {"^%int-%int-%intT%int:%int:%int[-+]%int:%int$", MR_Default}, - {"Running .*/reporter_output_test(\\.exe)?$", MR_Next}, - {"Run on \\(%int X %float MHz CPU s?\\)", MR_Next}, - }); - AddCases(TC_JSONOut, - {{"^\\{", MR_Default}, - {"\"context\":", MR_Next}, - {"\"date\": \"", MR_Next}, - {"\"host_name\":", MR_Next}, - {"\"executable\": \".*(/|\\\\)reporter_output_test(\\.exe)?\",", - MR_Next}, - {"\"num_cpus\": %int,$", MR_Next}, - {"\"mhz_per_cpu\": %float,$", MR_Next}, - {"\"caches\": \\[$", MR_Default}}); - auto const& Info = benchmark::CPUInfo::Get(); - auto const& Caches = Info.caches; - if (!Caches.empty()) { - AddCases(TC_ConsoleErr, {{"CPU Caches:$", MR_Next}}); - } - for (size_t I = 0; I < Caches.size(); ++I) { - std::string num_caches_str = - Caches[I].num_sharing != 0 ? " \\(x%int\\)$" : "$"; - AddCases(TC_ConsoleErr, - {{"L%int (Data|Instruction|Unified) %int KiB" + num_caches_str, - MR_Next}}); - AddCases(TC_JSONOut, {{"\\{$", MR_Next}, - {"\"type\": \"", MR_Next}, - {"\"level\": %int,$", MR_Next}, - {"\"size\": %int,$", MR_Next}, - {"\"num_sharing\": %int$", MR_Next}, - {"}[,]{0,1}$", MR_Next}}); - } - AddCases(TC_JSONOut, {{"],$"}}); - auto const& LoadAvg = Info.load_avg; - if (!LoadAvg.empty()) { - AddCases(TC_ConsoleErr, - {{"Load Average: (%float, ){0,2}%float$", MR_Next}}); - } - AddCases(TC_JSONOut, {{"\"load_avg\": \\[(%float,?){0,3}],$", MR_Next}}); - return 0; -} -int dummy_register = AddContextCases(); -ADD_CASES(TC_CSVOut, {{"%csv_header"}}); - -// ========================================================================= // -// ------------------------ Testing Basic Output --------------------------- // -// ========================================================================= // - -void BM_basic(benchmark::State& state) { - for (auto _ : state) { - } -} -BENCHMARK(BM_basic); - -ADD_CASES(TC_ConsoleOut, {{"^BM_basic %console_report$"}}); -ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_basic\",$"}, - {"\"run_name\": \"BM_basic\",$", MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, - {"\"repetition_index\": 0,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %float,$", MR_Next}, - {"\"cpu_time\": %float,$", MR_Next}, - {"\"time_unit\": \"ns\"$", MR_Next}, - {"}", MR_Next}}); -ADD_CASES(TC_CSVOut, {{"^\"BM_basic\",%csv_report$"}}); - -// ========================================================================= // -// ------------------------ Testing Bytes per Second Output ---------------- // -// ========================================================================= // - -void BM_bytes_per_second(benchmark::State& state) { - for (auto _ : state) { - // This test requires a non-zero CPU time to avoid divide-by-zero - benchmark::DoNotOptimize(state.iterations()); - } - state.SetBytesProcessed(1); -} -BENCHMARK(BM_bytes_per_second); - -ADD_CASES(TC_ConsoleOut, {{"^BM_bytes_per_second %console_report " - "bytes_per_second=%float[kM]{0,1}/s$"}}); -ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_bytes_per_second\",$"}, - {"\"run_name\": \"BM_bytes_per_second\",$", MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, - {"\"repetition_index\": 0,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %float,$", MR_Next}, - {"\"cpu_time\": %float,$", MR_Next}, - {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"bytes_per_second\": %float$", MR_Next}, - {"}", MR_Next}}); -ADD_CASES(TC_CSVOut, {{"^\"BM_bytes_per_second\",%csv_bytes_report$"}}); - -// ========================================================================= // -// ------------------------ Testing Items per Second Output ---------------- // -// ========================================================================= // - -void BM_items_per_second(benchmark::State& state) { - for (auto _ : state) { - // This test requires a non-zero CPU time to avoid divide-by-zero - benchmark::DoNotOptimize(state.iterations()); - } - state.SetItemsProcessed(1); -} -BENCHMARK(BM_items_per_second); - -ADD_CASES(TC_ConsoleOut, {{"^BM_items_per_second %console_report " - "items_per_second=%float[kM]{0,1}/s$"}}); -ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_items_per_second\",$"}, - {"\"run_name\": \"BM_items_per_second\",$", MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, - {"\"repetition_index\": 0,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %float,$", MR_Next}, - {"\"cpu_time\": %float,$", MR_Next}, - {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"items_per_second\": %float$", MR_Next}, - {"}", MR_Next}}); -ADD_CASES(TC_CSVOut, {{"^\"BM_items_per_second\",%csv_items_report$"}}); - -// ========================================================================= // -// ------------------------ Testing Label Output --------------------------- // -// ========================================================================= // - -void BM_label(benchmark::State& state) { - for (auto _ : state) { - } - state.SetLabel("some label"); -} -BENCHMARK(BM_label); - -ADD_CASES(TC_ConsoleOut, {{"^BM_label %console_report some label$"}}); -ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_label\",$"}, - {"\"run_name\": \"BM_label\",$", MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, - {"\"repetition_index\": 0,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %float,$", MR_Next}, - {"\"cpu_time\": %float,$", MR_Next}, - {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"label\": \"some label\"$", MR_Next}, - {"}", MR_Next}}); -ADD_CASES(TC_CSVOut, {{"^\"BM_label\",%csv_label_report_begin\"some " - "label\"%csv_label_report_end$"}}); - -// ========================================================================= // -// ------------------------ Testing Error Output --------------------------- // -// ========================================================================= // - -void BM_error(benchmark::State& state) { - state.SkipWithError("message"); - for (auto _ : state) { - } -} -BENCHMARK(BM_error); -ADD_CASES(TC_ConsoleOut, {{"^BM_error[ ]+ERROR OCCURRED: 'message'$"}}); -ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_error\",$"}, - {"\"run_name\": \"BM_error\",$", MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, - {"\"repetition_index\": 0,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"error_occurred\": true,$", MR_Next}, - {"\"error_message\": \"message\",$", MR_Next}}); - -ADD_CASES(TC_CSVOut, {{"^\"BM_error\",,,,,,,,true,\"message\"$"}}); - -// ========================================================================= // -// ------------------------ Testing No Arg Name Output ----------------------- -// // -// ========================================================================= // - -void BM_no_arg_name(benchmark::State& state) { - for (auto _ : state) { - } -} -BENCHMARK(BM_no_arg_name)->Arg(3); -ADD_CASES(TC_ConsoleOut, {{"^BM_no_arg_name/3 %console_report$"}}); -ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_no_arg_name/3\",$"}, - {"\"run_name\": \"BM_no_arg_name/3\",$", MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, - {"\"repetition_index\": 0,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}}); -ADD_CASES(TC_CSVOut, {{"^\"BM_no_arg_name/3\",%csv_report$"}}); - -// ========================================================================= // -// ------------------------ Testing Arg Name Output ----------------------- // -// ========================================================================= // - -void BM_arg_name(benchmark::State& state) { - for (auto _ : state) { - } -} -BENCHMARK(BM_arg_name)->ArgName("first")->Arg(3); -ADD_CASES(TC_ConsoleOut, {{"^BM_arg_name/first:3 %console_report$"}}); -ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_arg_name/first:3\",$"}, - {"\"run_name\": \"BM_arg_name/first:3\",$", MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, - {"\"repetition_index\": 0,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}}); -ADD_CASES(TC_CSVOut, {{"^\"BM_arg_name/first:3\",%csv_report$"}}); - -// ========================================================================= // -// ------------------------ Testing Arg Names Output ----------------------- // -// ========================================================================= // - -void BM_arg_names(benchmark::State& state) { - for (auto _ : state) { - } -} -BENCHMARK(BM_arg_names)->Args({2, 5, 4})->ArgNames({"first", "", "third"}); -ADD_CASES(TC_ConsoleOut, - {{"^BM_arg_names/first:2/5/third:4 %console_report$"}}); -ADD_CASES(TC_JSONOut, - {{"\"name\": \"BM_arg_names/first:2/5/third:4\",$"}, - {"\"run_name\": \"BM_arg_names/first:2/5/third:4\",$", MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, - {"\"repetition_index\": 0,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}}); -ADD_CASES(TC_CSVOut, {{"^\"BM_arg_names/first:2/5/third:4\",%csv_report$"}}); - -// ========================================================================= // -// ------------------------ Testing Big Args Output ------------------------ // -// ========================================================================= // - -void BM_BigArgs(benchmark::State& state) { - for (auto _ : state) { - } -} -BENCHMARK(BM_BigArgs)->RangeMultiplier(2)->Range(1U << 30U, 1U << 31U); -ADD_CASES(TC_ConsoleOut, {{"^BM_BigArgs/1073741824 %console_report$"}, - {"^BM_BigArgs/2147483648 %console_report$"}}); - -// ========================================================================= // -// ----------------------- Testing Complexity Output ----------------------- // -// ========================================================================= // - -void BM_Complexity_O1(benchmark::State& state) { - for (auto _ : state) { - // This test requires a non-zero CPU time to avoid divide-by-zero - benchmark::DoNotOptimize(state.iterations()); - } - state.SetComplexityN(state.range(0)); -} -BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1); -SET_SUBSTITUTIONS({{"%bigOStr", "[ ]* %float \\([0-9]+\\)"}, - {"%RMS", "[ ]*[0-9]+ %"}}); -ADD_CASES(TC_ConsoleOut, {{"^BM_Complexity_O1_BigO %bigOStr %bigOStr[ ]*$"}, - {"^BM_Complexity_O1_RMS %RMS %RMS[ ]*$"}}); - -// ========================================================================= // -// ----------------------- Testing Aggregate Output ------------------------ // -// ========================================================================= // - -// Test that non-aggregate data is printed by default -void BM_Repeat(benchmark::State& state) { - for (auto _ : state) { - } -} -// need two repetitions min to be able to output any aggregate output -BENCHMARK(BM_Repeat)->Repetitions(2); -ADD_CASES(TC_ConsoleOut, - {{"^BM_Repeat/repeats:2 %console_report$"}, - {"^BM_Repeat/repeats:2 %console_report$"}, - {"^BM_Repeat/repeats:2_mean %console_time_only_report [ ]*2$"}, - {"^BM_Repeat/repeats:2_median %console_time_only_report [ ]*2$"}, - {"^BM_Repeat/repeats:2_stddev %console_time_only_report [ ]*2$"}}); -ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:2\",$"}, - {"\"run_name\": \"BM_Repeat/repeats:2\"", MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 2,$", MR_Next}, - {"\"repetition_index\": 0,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"name\": \"BM_Repeat/repeats:2\",$"}, - {"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 2,$", MR_Next}, - {"\"repetition_index\": 1,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"name\": \"BM_Repeat/repeats:2_mean\",$"}, - {"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next}, - {"\"run_type\": \"aggregate\",$", MR_Next}, - {"\"repetitions\": 2,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"aggregate_name\": \"mean\",$", MR_Next}, - {"\"iterations\": 2,$", MR_Next}, - {"\"name\": \"BM_Repeat/repeats:2_median\",$"}, - {"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next}, - {"\"run_type\": \"aggregate\",$", MR_Next}, - {"\"repetitions\": 2,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"aggregate_name\": \"median\",$", MR_Next}, - {"\"iterations\": 2,$", MR_Next}, - {"\"name\": \"BM_Repeat/repeats:2_stddev\",$"}, - {"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next}, - {"\"run_type\": \"aggregate\",$", MR_Next}, - {"\"repetitions\": 2,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"aggregate_name\": \"stddev\",$", MR_Next}, - {"\"iterations\": 2,$", MR_Next}}); -ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:2\",%csv_report$"}, - {"^\"BM_Repeat/repeats:2\",%csv_report$"}, - {"^\"BM_Repeat/repeats:2_mean\",%csv_report$"}, - {"^\"BM_Repeat/repeats:2_median\",%csv_report$"}, - {"^\"BM_Repeat/repeats:2_stddev\",%csv_report$"}}); -// but for two repetitions, mean and median is the same, so let's repeat.. -BENCHMARK(BM_Repeat)->Repetitions(3); -ADD_CASES(TC_ConsoleOut, - {{"^BM_Repeat/repeats:3 %console_report$"}, - {"^BM_Repeat/repeats:3 %console_report$"}, - {"^BM_Repeat/repeats:3 %console_report$"}, - {"^BM_Repeat/repeats:3_mean %console_time_only_report [ ]*3$"}, - {"^BM_Repeat/repeats:3_median %console_time_only_report [ ]*3$"}, - {"^BM_Repeat/repeats:3_stddev %console_time_only_report [ ]*3$"}}); -ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:3\",$"}, - {"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 3,$", MR_Next}, - {"\"repetition_index\": 0,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"name\": \"BM_Repeat/repeats:3\",$"}, - {"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 3,$", MR_Next}, - {"\"repetition_index\": 1,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"name\": \"BM_Repeat/repeats:3\",$"}, - {"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 3,$", MR_Next}, - {"\"repetition_index\": 2,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"name\": \"BM_Repeat/repeats:3_mean\",$"}, - {"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next}, - {"\"run_type\": \"aggregate\",$", MR_Next}, - {"\"repetitions\": 3,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"aggregate_name\": \"mean\",$", MR_Next}, - {"\"iterations\": 3,$", MR_Next}, - {"\"name\": \"BM_Repeat/repeats:3_median\",$"}, - {"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next}, - {"\"run_type\": \"aggregate\",$", MR_Next}, - {"\"repetitions\": 3,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"aggregate_name\": \"median\",$", MR_Next}, - {"\"iterations\": 3,$", MR_Next}, - {"\"name\": \"BM_Repeat/repeats:3_stddev\",$"}, - {"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next}, - {"\"run_type\": \"aggregate\",$", MR_Next}, - {"\"repetitions\": 3,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"aggregate_name\": \"stddev\",$", MR_Next}, - {"\"iterations\": 3,$", MR_Next}}); -ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:3\",%csv_report$"}, - {"^\"BM_Repeat/repeats:3\",%csv_report$"}, - {"^\"BM_Repeat/repeats:3\",%csv_report$"}, - {"^\"BM_Repeat/repeats:3_mean\",%csv_report$"}, - {"^\"BM_Repeat/repeats:3_median\",%csv_report$"}, - {"^\"BM_Repeat/repeats:3_stddev\",%csv_report$"}}); -// median differs between even/odd number of repetitions, so just to be sure -BENCHMARK(BM_Repeat)->Repetitions(4); -ADD_CASES(TC_ConsoleOut, - {{"^BM_Repeat/repeats:4 %console_report$"}, - {"^BM_Repeat/repeats:4 %console_report$"}, - {"^BM_Repeat/repeats:4 %console_report$"}, - {"^BM_Repeat/repeats:4 %console_report$"}, - {"^BM_Repeat/repeats:4_mean %console_time_only_report [ ]*4$"}, - {"^BM_Repeat/repeats:4_median %console_time_only_report [ ]*4$"}, - {"^BM_Repeat/repeats:4_stddev %console_time_only_report [ ]*4$"}}); -ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:4\",$"}, - {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 4,$", MR_Next}, - {"\"repetition_index\": 0,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"name\": \"BM_Repeat/repeats:4\",$"}, - {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 4,$", MR_Next}, - {"\"repetition_index\": 1,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"name\": \"BM_Repeat/repeats:4\",$"}, - {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 4,$", MR_Next}, - {"\"repetition_index\": 2,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"name\": \"BM_Repeat/repeats:4\",$"}, - {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 4,$", MR_Next}, - {"\"repetition_index\": 3,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"name\": \"BM_Repeat/repeats:4_mean\",$"}, - {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next}, - {"\"run_type\": \"aggregate\",$", MR_Next}, - {"\"repetitions\": 4,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"aggregate_name\": \"mean\",$", MR_Next}, - {"\"iterations\": 4,$", MR_Next}, - {"\"name\": \"BM_Repeat/repeats:4_median\",$"}, - {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next}, - {"\"run_type\": \"aggregate\",$", MR_Next}, - {"\"repetitions\": 4,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"aggregate_name\": \"median\",$", MR_Next}, - {"\"iterations\": 4,$", MR_Next}, - {"\"name\": \"BM_Repeat/repeats:4_stddev\",$"}, - {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next}, - {"\"run_type\": \"aggregate\",$", MR_Next}, - {"\"repetitions\": 4,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"aggregate_name\": \"stddev\",$", MR_Next}, - {"\"iterations\": 4,$", MR_Next}}); -ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:4\",%csv_report$"}, - {"^\"BM_Repeat/repeats:4\",%csv_report$"}, - {"^\"BM_Repeat/repeats:4\",%csv_report$"}, - {"^\"BM_Repeat/repeats:4\",%csv_report$"}, - {"^\"BM_Repeat/repeats:4_mean\",%csv_report$"}, - {"^\"BM_Repeat/repeats:4_median\",%csv_report$"}, - {"^\"BM_Repeat/repeats:4_stddev\",%csv_report$"}}); - -// Test that a non-repeated test still prints non-aggregate results even when -// only-aggregate reports have been requested -void BM_RepeatOnce(benchmark::State& state) { - for (auto _ : state) { - } -} -BENCHMARK(BM_RepeatOnce)->Repetitions(1)->ReportAggregatesOnly(); -ADD_CASES(TC_ConsoleOut, {{"^BM_RepeatOnce/repeats:1 %console_report$"}}); -ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_RepeatOnce/repeats:1\",$"}, - {"\"run_name\": \"BM_RepeatOnce/repeats:1\",$", MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 1,$", MR_Next}, - {"\"repetition_index\": 0,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}}); -ADD_CASES(TC_CSVOut, {{"^\"BM_RepeatOnce/repeats:1\",%csv_report$"}}); - -// Test that non-aggregate data is not reported -void BM_SummaryRepeat(benchmark::State& state) { - for (auto _ : state) { - } -} -BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly(); -ADD_CASES( - TC_ConsoleOut, - {{".*BM_SummaryRepeat/repeats:3 ", MR_Not}, - {"^BM_SummaryRepeat/repeats:3_mean %console_time_only_report [ ]*3$"}, - {"^BM_SummaryRepeat/repeats:3_median %console_time_only_report [ ]*3$"}, - {"^BM_SummaryRepeat/repeats:3_stddev %console_time_only_report [ ]*3$"}}); -ADD_CASES(TC_JSONOut, - {{".*BM_SummaryRepeat/repeats:3 ", MR_Not}, - {"\"name\": \"BM_SummaryRepeat/repeats:3_mean\",$"}, - {"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next}, - {"\"run_type\": \"aggregate\",$", MR_Next}, - {"\"repetitions\": 3,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"aggregate_name\": \"mean\",$", MR_Next}, - {"\"iterations\": 3,$", MR_Next}, - {"\"name\": \"BM_SummaryRepeat/repeats:3_median\",$"}, - {"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next}, - {"\"run_type\": \"aggregate\",$", MR_Next}, - {"\"repetitions\": 3,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"aggregate_name\": \"median\",$", MR_Next}, - {"\"iterations\": 3,$", MR_Next}, - {"\"name\": \"BM_SummaryRepeat/repeats:3_stddev\",$"}, - {"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next}, - {"\"run_type\": \"aggregate\",$", MR_Next}, - {"\"repetitions\": 3,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"aggregate_name\": \"stddev\",$", MR_Next}, - {"\"iterations\": 3,$", MR_Next}}); -ADD_CASES(TC_CSVOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not}, - {"^\"BM_SummaryRepeat/repeats:3_mean\",%csv_report$"}, - {"^\"BM_SummaryRepeat/repeats:3_median\",%csv_report$"}, - {"^\"BM_SummaryRepeat/repeats:3_stddev\",%csv_report$"}}); - -// Test that non-aggregate data is not displayed. -// NOTE: this test is kinda bad. we are only testing the display output. -// But we don't check that the file output still contains everything... -void BM_SummaryDisplay(benchmark::State& state) { - for (auto _ : state) { - } -} -BENCHMARK(BM_SummaryDisplay)->Repetitions(2)->DisplayAggregatesOnly(); -ADD_CASES( - TC_ConsoleOut, - {{".*BM_SummaryDisplay/repeats:2 ", MR_Not}, - {"^BM_SummaryDisplay/repeats:2_mean %console_time_only_report [ ]*2$"}, - {"^BM_SummaryDisplay/repeats:2_median %console_time_only_report [ ]*2$"}, - {"^BM_SummaryDisplay/repeats:2_stddev %console_time_only_report [ ]*2$"}}); -ADD_CASES(TC_JSONOut, - {{".*BM_SummaryDisplay/repeats:2 ", MR_Not}, - {"\"name\": \"BM_SummaryDisplay/repeats:2_mean\",$"}, - {"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next}, - {"\"run_type\": \"aggregate\",$", MR_Next}, - {"\"repetitions\": 2,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"aggregate_name\": \"mean\",$", MR_Next}, - {"\"iterations\": 2,$", MR_Next}, - {"\"name\": \"BM_SummaryDisplay/repeats:2_median\",$"}, - {"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next}, - {"\"run_type\": \"aggregate\",$", MR_Next}, - {"\"repetitions\": 2,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"aggregate_name\": \"median\",$", MR_Next}, - {"\"iterations\": 2,$", MR_Next}, - {"\"name\": \"BM_SummaryDisplay/repeats:2_stddev\",$"}, - {"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next}, - {"\"run_type\": \"aggregate\",$", MR_Next}, - {"\"repetitions\": 2,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"aggregate_name\": \"stddev\",$", MR_Next}, - {"\"iterations\": 2,$", MR_Next}}); -ADD_CASES(TC_CSVOut, - {{".*BM_SummaryDisplay/repeats:2 ", MR_Not}, - {"^\"BM_SummaryDisplay/repeats:2_mean\",%csv_report$"}, - {"^\"BM_SummaryDisplay/repeats:2_median\",%csv_report$"}, - {"^\"BM_SummaryDisplay/repeats:2_stddev\",%csv_report$"}}); - -// Test repeats with custom time unit. -void BM_RepeatTimeUnit(benchmark::State& state) { - for (auto _ : state) { - } -} -BENCHMARK(BM_RepeatTimeUnit) - ->Repetitions(3) - ->ReportAggregatesOnly() - ->Unit(benchmark::kMicrosecond); -ADD_CASES( - TC_ConsoleOut, - {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not}, - {"^BM_RepeatTimeUnit/repeats:3_mean %console_us_time_only_report [ ]*3$"}, - {"^BM_RepeatTimeUnit/repeats:3_median %console_us_time_only_report [ " - "]*3$"}, - {"^BM_RepeatTimeUnit/repeats:3_stddev %console_us_time_only_report [ " - "]*3$"}}); -ADD_CASES(TC_JSONOut, - {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not}, - {"\"name\": \"BM_RepeatTimeUnit/repeats:3_mean\",$"}, - {"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next}, - {"\"run_type\": \"aggregate\",$", MR_Next}, - {"\"repetitions\": 3,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"aggregate_name\": \"mean\",$", MR_Next}, - {"\"iterations\": 3,$", MR_Next}, - {"\"time_unit\": \"us\",?$"}, - {"\"name\": \"BM_RepeatTimeUnit/repeats:3_median\",$"}, - {"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next}, - {"\"run_type\": \"aggregate\",$", MR_Next}, - {"\"repetitions\": 3,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"aggregate_name\": \"median\",$", MR_Next}, - {"\"iterations\": 3,$", MR_Next}, - {"\"time_unit\": \"us\",?$"}, - {"\"name\": \"BM_RepeatTimeUnit/repeats:3_stddev\",$"}, - {"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next}, - {"\"run_type\": \"aggregate\",$", MR_Next}, - {"\"repetitions\": 3,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"aggregate_name\": \"stddev\",$", MR_Next}, - {"\"iterations\": 3,$", MR_Next}, - {"\"time_unit\": \"us\",?$"}}); -ADD_CASES(TC_CSVOut, - {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not}, - {"^\"BM_RepeatTimeUnit/repeats:3_mean\",%csv_us_report$"}, - {"^\"BM_RepeatTimeUnit/repeats:3_median\",%csv_us_report$"}, - {"^\"BM_RepeatTimeUnit/repeats:3_stddev\",%csv_us_report$"}}); - -// ========================================================================= // -// -------------------- Testing user-provided statistics ------------------- // -// ========================================================================= // - -const auto UserStatistics = [](const std::vector& v) { - return v.back(); -}; -void BM_UserStats(benchmark::State& state) { - for (auto _ : state) { - state.SetIterationTime(150 / 10e8); - } -} -// clang-format off -BENCHMARK(BM_UserStats) - ->Repetitions(3) - ->Iterations(5) - ->UseManualTime() - ->ComputeStatistics("", UserStatistics); -// clang-format on - -// check that user-provided stats is calculated, and is after the default-ones -// empty string as name is intentional, it would sort before anything else -ADD_CASES(TC_ConsoleOut, {{"^BM_UserStats/iterations:5/repeats:3/manual_time [ " - "]* 150 ns %time [ ]*5$"}, - {"^BM_UserStats/iterations:5/repeats:3/manual_time [ " - "]* 150 ns %time [ ]*5$"}, - {"^BM_UserStats/iterations:5/repeats:3/manual_time [ " - "]* 150 ns %time [ ]*5$"}, - {"^BM_UserStats/iterations:5/repeats:3/" - "manual_time_mean [ ]* 150 ns %time [ ]*3$"}, - {"^BM_UserStats/iterations:5/repeats:3/" - "manual_time_median [ ]* 150 ns %time [ ]*3$"}, - {"^BM_UserStats/iterations:5/repeats:3/" - "manual_time_stddev [ ]* 0.000 ns %time [ ]*3$"}, - {"^BM_UserStats/iterations:5/repeats:3/manual_time_ " - "[ ]* 150 ns %time [ ]*3$"}}); -ADD_CASES( - TC_JSONOut, - {{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"}, - {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", - MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 3,$", MR_Next}, - {"\"repetition_index\": 0,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"iterations\": 5,$", MR_Next}, - {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, - {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"}, - {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", - MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 3,$", MR_Next}, - {"\"repetition_index\": 1,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"iterations\": 5,$", MR_Next}, - {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, - {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"}, - {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", - MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 3,$", MR_Next}, - {"\"repetition_index\": 2,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"iterations\": 5,$", MR_Next}, - {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, - {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_mean\",$"}, - {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", - MR_Next}, - {"\"run_type\": \"aggregate\",$", MR_Next}, - {"\"repetitions\": 3,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"aggregate_name\": \"mean\",$", MR_Next}, - {"\"iterations\": 3,$", MR_Next}, - {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, - {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_median\",$"}, - {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", - MR_Next}, - {"\"run_type\": \"aggregate\",$", MR_Next}, - {"\"repetitions\": 3,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"aggregate_name\": \"median\",$", MR_Next}, - {"\"iterations\": 3,$", MR_Next}, - {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, - {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_stddev\",$"}, - {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", - MR_Next}, - {"\"run_type\": \"aggregate\",$", MR_Next}, - {"\"repetitions\": 3,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"aggregate_name\": \"stddev\",$", MR_Next}, - {"\"iterations\": 3,$", MR_Next}, - {"\"real_time\": %float,$", MR_Next}, - {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_\",$"}, - {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", - MR_Next}, - {"\"run_type\": \"aggregate\",$", MR_Next}, - {"\"repetitions\": 3,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"aggregate_name\": \"\",$", MR_Next}, - {"\"iterations\": 3,$", MR_Next}, - {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}}); -ADD_CASES( - TC_CSVOut, - {{"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"}, - {"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"}, - {"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"}, - {"^\"BM_UserStats/iterations:5/repeats:3/manual_time_mean\",%csv_report$"}, - {"^\"BM_UserStats/iterations:5/repeats:3/" - "manual_time_median\",%csv_report$"}, - {"^\"BM_UserStats/iterations:5/repeats:3/" - "manual_time_stddev\",%csv_report$"}, - {"^\"BM_UserStats/iterations:5/repeats:3/manual_time_\",%csv_report$"}}); - -// ========================================================================= // -// ------------------------- Testing StrEscape JSON ------------------------ // -// ========================================================================= // -#if 0 // enable when csv testing code correctly handles multi-line fields -void BM_JSON_Format(benchmark::State& state) { - state.SkipWithError("val\b\f\n\r\t\\\"with\"es,capes"); - for (auto _ : state) { - } -} -BENCHMARK(BM_JSON_Format); -ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_JSON_Format\",$"}, - {"\"run_name\": \"BM_JSON_Format\",$", MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, - {"\"repetition_index\": 0,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"error_occurred\": true,$", MR_Next}, - {R"("error_message": "val\\b\\f\\n\\r\\t\\\\\\"with\\"es,capes",$)", MR_Next}}); -#endif -// ========================================================================= // -// -------------------------- Testing CsvEscape ---------------------------- // -// ========================================================================= // - -void BM_CSV_Format(benchmark::State& state) { - state.SkipWithError("\"freedom\""); - for (auto _ : state) { - } -} -BENCHMARK(BM_CSV_Format); -ADD_CASES(TC_CSVOut, {{"^\"BM_CSV_Format\",,,,,,,,true,\"\"\"freedom\"\"\"$"}}); - -// ========================================================================= // -// --------------------------- TEST CASES END ------------------------------ // -// ========================================================================= // - -int main(int argc, char* argv[]) { RunOutputTests(argc, argv); } diff --git a/benchmarks/thirdparty/benchmark/test/skip_with_error_test.cc b/benchmarks/thirdparty/benchmark/test/skip_with_error_test.cc deleted file mode 100755 index 97a2e3c03b..0000000000 --- a/benchmarks/thirdparty/benchmark/test/skip_with_error_test.cc +++ /dev/null @@ -1,195 +0,0 @@ - -#undef NDEBUG -#include -#include - -#include "../src/check.h" // NOTE: check.h is for internal use only! -#include "benchmark/benchmark.h" - -namespace { - -class TestReporter : public benchmark::ConsoleReporter { - public: - virtual bool ReportContext(const Context& context) { - return ConsoleReporter::ReportContext(context); - }; - - virtual void ReportRuns(const std::vector& report) { - all_runs_.insert(all_runs_.end(), begin(report), end(report)); - ConsoleReporter::ReportRuns(report); - } - - TestReporter() {} - virtual ~TestReporter() {} - - mutable std::vector all_runs_; -}; - -struct TestCase { - std::string name; - bool error_occurred; - std::string error_message; - - typedef benchmark::BenchmarkReporter::Run Run; - - void CheckRun(Run const& run) const { - CHECK(name == run.benchmark_name()) - << "expected " << name << " got " << run.benchmark_name(); - CHECK(error_occurred == run.error_occurred); - CHECK(error_message == run.error_message); - if (error_occurred) { - // CHECK(run.iterations == 0); - } else { - CHECK(run.iterations != 0); - } - } -}; - -std::vector ExpectedResults; - -int AddCases(const char* base_name, std::initializer_list const& v) { - for (auto TC : v) { - TC.name = base_name + TC.name; - ExpectedResults.push_back(std::move(TC)); - } - return 0; -} - -#define CONCAT(x, y) CONCAT2(x, y) -#define CONCAT2(x, y) x##y -#define ADD_CASES(...) int CONCAT(dummy, __LINE__) = AddCases(__VA_ARGS__) - -} // end namespace - -void BM_error_no_running(benchmark::State& state) { - state.SkipWithError("error message"); -} -BENCHMARK(BM_error_no_running); -ADD_CASES("BM_error_no_running", {{"", true, "error message"}}); - -void BM_error_before_running(benchmark::State& state) { - state.SkipWithError("error message"); - while (state.KeepRunning()) { - assert(false); - } -} -BENCHMARK(BM_error_before_running); -ADD_CASES("BM_error_before_running", {{"", true, "error message"}}); - -void BM_error_before_running_batch(benchmark::State& state) { - state.SkipWithError("error message"); - while (state.KeepRunningBatch(17)) { - assert(false); - } -} -BENCHMARK(BM_error_before_running_batch); -ADD_CASES("BM_error_before_running_batch", {{"", true, "error message"}}); - -void BM_error_before_running_range_for(benchmark::State& state) { - state.SkipWithError("error message"); - for (auto _ : state) { - assert(false); - } -} -BENCHMARK(BM_error_before_running_range_for); -ADD_CASES("BM_error_before_running_range_for", {{"", true, "error message"}}); - -void BM_error_during_running(benchmark::State& state) { - int first_iter = true; - while (state.KeepRunning()) { - if (state.range(0) == 1 && state.thread_index <= (state.threads / 2)) { - assert(first_iter); - first_iter = false; - state.SkipWithError("error message"); - } else { - state.PauseTiming(); - state.ResumeTiming(); - } - } -} -BENCHMARK(BM_error_during_running)->Arg(1)->Arg(2)->ThreadRange(1, 8); -ADD_CASES("BM_error_during_running", {{"/1/threads:1", true, "error message"}, - {"/1/threads:2", true, "error message"}, - {"/1/threads:4", true, "error message"}, - {"/1/threads:8", true, "error message"}, - {"/2/threads:1", false, ""}, - {"/2/threads:2", false, ""}, - {"/2/threads:4", false, ""}, - {"/2/threads:8", false, ""}}); - -void BM_error_during_running_ranged_for(benchmark::State& state) { - assert(state.max_iterations > 3 && "test requires at least a few iterations"); - int first_iter = true; - // NOTE: Users should not write the for loop explicitly. - for (auto It = state.begin(), End = state.end(); It != End; ++It) { - if (state.range(0) == 1) { - assert(first_iter); - first_iter = false; - state.SkipWithError("error message"); - // Test the unfortunate but documented behavior that the ranged-for loop - // doesn't automatically terminate when SkipWithError is set. - assert(++It != End); - break; // Required behavior - } - } -} -BENCHMARK(BM_error_during_running_ranged_for)->Arg(1)->Arg(2)->Iterations(5); -ADD_CASES("BM_error_during_running_ranged_for", - {{"/1/iterations:5", true, "error message"}, - {"/2/iterations:5", false, ""}}); - -void BM_error_after_running(benchmark::State& state) { - for (auto _ : state) { - benchmark::DoNotOptimize(state.iterations()); - } - if (state.thread_index <= (state.threads / 2)) - state.SkipWithError("error message"); -} -BENCHMARK(BM_error_after_running)->ThreadRange(1, 8); -ADD_CASES("BM_error_after_running", {{"/threads:1", true, "error message"}, - {"/threads:2", true, "error message"}, - {"/threads:4", true, "error message"}, - {"/threads:8", true, "error message"}}); - -void BM_error_while_paused(benchmark::State& state) { - bool first_iter = true; - while (state.KeepRunning()) { - if (state.range(0) == 1 && state.thread_index <= (state.threads / 2)) { - assert(first_iter); - first_iter = false; - state.PauseTiming(); - state.SkipWithError("error message"); - } else { - state.PauseTiming(); - state.ResumeTiming(); - } - } -} -BENCHMARK(BM_error_while_paused)->Arg(1)->Arg(2)->ThreadRange(1, 8); -ADD_CASES("BM_error_while_paused", {{"/1/threads:1", true, "error message"}, - {"/1/threads:2", true, "error message"}, - {"/1/threads:4", true, "error message"}, - {"/1/threads:8", true, "error message"}, - {"/2/threads:1", false, ""}, - {"/2/threads:2", false, ""}, - {"/2/threads:4", false, ""}, - {"/2/threads:8", false, ""}}); - -int main(int argc, char* argv[]) { - benchmark::Initialize(&argc, argv); - - TestReporter test_reporter; - benchmark::RunSpecifiedBenchmarks(&test_reporter); - - typedef benchmark::BenchmarkReporter::Run Run; - auto EB = ExpectedResults.begin(); - - for (Run const& run : test_reporter.all_runs_) { - assert(EB != ExpectedResults.end()); - EB->CheckRun(run); - ++EB; - } - assert(EB == ExpectedResults.end()); - - return 0; -} diff --git a/benchmarks/thirdparty/benchmark/test/state_assembly_test.cc b/benchmarks/thirdparty/benchmark/test/state_assembly_test.cc deleted file mode 100755 index 7ddbb3b2a9..0000000000 --- a/benchmarks/thirdparty/benchmark/test/state_assembly_test.cc +++ /dev/null @@ -1,68 +0,0 @@ -#include - -#ifdef __clang__ -#pragma clang diagnostic ignored "-Wreturn-type" -#endif - -// clang-format off -extern "C" { - extern int ExternInt; - benchmark::State& GetState(); - void Fn(); -} -// clang-format on - -using benchmark::State; - -// CHECK-LABEL: test_for_auto_loop: -extern "C" int test_for_auto_loop() { - State& S = GetState(); - int x = 42; - // CHECK: [[CALL:call(q)*]] _ZN9benchmark5State16StartKeepRunningEv - // CHECK-NEXT: testq %rbx, %rbx - // CHECK-NEXT: je [[LOOP_END:.*]] - - for (auto _ : S) { - // CHECK: .L[[LOOP_HEAD:[a-zA-Z0-9_]+]]: - // CHECK-GNU-NEXT: subq $1, %rbx - // CHECK-CLANG-NEXT: {{(addq \$1, %rax|incq %rax|addq \$-1, %rbx)}} - // CHECK-NEXT: jne .L[[LOOP_HEAD]] - benchmark::DoNotOptimize(x); - } - // CHECK: [[LOOP_END]]: - // CHECK: [[CALL]] _ZN9benchmark5State17FinishKeepRunningEv - - // CHECK: movl $101, %eax - // CHECK: ret - return 101; -} - -// CHECK-LABEL: test_while_loop: -extern "C" int test_while_loop() { - State& S = GetState(); - int x = 42; - - // CHECK: j{{(e|mp)}} .L[[LOOP_HEADER:[a-zA-Z0-9_]+]] - // CHECK-NEXT: .L[[LOOP_BODY:[a-zA-Z0-9_]+]]: - while (S.KeepRunning()) { - // CHECK-GNU-NEXT: subq $1, %[[IREG:[a-z]+]] - // CHECK-CLANG-NEXT: {{(addq \$-1,|decq)}} %[[IREG:[a-z]+]] - // CHECK: movq %[[IREG]], [[DEST:.*]] - benchmark::DoNotOptimize(x); - } - // CHECK-DAG: movq [[DEST]], %[[IREG]] - // CHECK-DAG: testq %[[IREG]], %[[IREG]] - // CHECK-DAG: jne .L[[LOOP_BODY]] - // CHECK-DAG: .L[[LOOP_HEADER]]: - - // CHECK: cmpb $0 - // CHECK-NEXT: jne .L[[LOOP_END:[a-zA-Z0-9_]+]] - // CHECK: [[CALL:call(q)*]] _ZN9benchmark5State16StartKeepRunningEv - - // CHECK: .L[[LOOP_END]]: - // CHECK: [[CALL]] _ZN9benchmark5State17FinishKeepRunningEv - - // CHECK: movl $101, %eax - // CHECK: ret - return 101; -} diff --git a/benchmarks/thirdparty/benchmark/test/statistics_gtest.cc b/benchmarks/thirdparty/benchmark/test/statistics_gtest.cc deleted file mode 100755 index 3ddc72dd7a..0000000000 --- a/benchmarks/thirdparty/benchmark/test/statistics_gtest.cc +++ /dev/null @@ -1,28 +0,0 @@ -//===---------------------------------------------------------------------===// -// statistics_test - Unit tests for src/statistics.cc -//===---------------------------------------------------------------------===// - -#include "../src/statistics.h" -#include "gtest/gtest.h" - -namespace { -TEST(StatisticsTest, Mean) { - EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({42, 42, 42, 42}), 42.0); - EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({1, 2, 3, 4}), 2.5); - EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({1, 2, 5, 10, 10, 14}), 7.0); -} - -TEST(StatisticsTest, Median) { - EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({42, 42, 42, 42}), 42.0); - EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({1, 2, 3, 4}), 2.5); - EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({1, 2, 5, 10, 10}), 5.0); -} - -TEST(StatisticsTest, StdDev) { - EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({101, 101, 101, 101}), 0.0); - EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({1, 2, 3}), 1.0); - EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({2.5, 2.4, 3.3, 4.2, 5.1}), - 1.151086443322134); -} - -} // end namespace diff --git a/benchmarks/thirdparty/benchmark/test/string_util_gtest.cc b/benchmarks/thirdparty/benchmark/test/string_util_gtest.cc deleted file mode 100755 index 01bf155d8c..0000000000 --- a/benchmarks/thirdparty/benchmark/test/string_util_gtest.cc +++ /dev/null @@ -1,153 +0,0 @@ -//===---------------------------------------------------------------------===// -// statistics_test - Unit tests for src/statistics.cc -//===---------------------------------------------------------------------===// - -#include "../src/string_util.h" -#include "../src/internal_macros.h" -#include "gtest/gtest.h" - -namespace { -TEST(StringUtilTest, stoul) { - { - size_t pos = 0; - EXPECT_EQ(0ul, benchmark::stoul("0", &pos)); - EXPECT_EQ(1ul, pos); - } - { - size_t pos = 0; - EXPECT_EQ(7ul, benchmark::stoul("7", &pos)); - EXPECT_EQ(1ul, pos); - } - { - size_t pos = 0; - EXPECT_EQ(135ul, benchmark::stoul("135", &pos)); - EXPECT_EQ(3ul, pos); - } -#if ULONG_MAX == 0xFFFFFFFFul - { - size_t pos = 0; - EXPECT_EQ(0xFFFFFFFFul, benchmark::stoul("4294967295", &pos)); - EXPECT_EQ(10ul, pos); - } -#elif ULONG_MAX == 0xFFFFFFFFFFFFFFFFul - { - size_t pos = 0; - EXPECT_EQ(0xFFFFFFFFFFFFFFFFul, benchmark::stoul("18446744073709551615", &pos)); - EXPECT_EQ(20ul, pos); - } -#endif - { - size_t pos = 0; - EXPECT_EQ(10ul, benchmark::stoul("1010", &pos, 2)); - EXPECT_EQ(4ul, pos); - } - { - size_t pos = 0; - EXPECT_EQ(520ul, benchmark::stoul("1010", &pos, 8)); - EXPECT_EQ(4ul, pos); - } - { - size_t pos = 0; - EXPECT_EQ(1010ul, benchmark::stoul("1010", &pos, 10)); - EXPECT_EQ(4ul, pos); - } - { - size_t pos = 0; - EXPECT_EQ(4112ul, benchmark::stoul("1010", &pos, 16)); - EXPECT_EQ(4ul, pos); - } - { - size_t pos = 0; - EXPECT_EQ(0xBEEFul, benchmark::stoul("BEEF", &pos, 16)); - EXPECT_EQ(4ul, pos); - } -#ifndef BENCHMARK_HAS_NO_EXCEPTIONS - { - ASSERT_THROW(benchmark::stoul("this is a test"), std::invalid_argument); - } -#endif -} - -TEST(StringUtilTest, stoi) { - { - size_t pos = 0; - EXPECT_EQ(0, benchmark::stoi("0", &pos)); - EXPECT_EQ(1ul, pos); - } - { - size_t pos = 0; - EXPECT_EQ(-17, benchmark::stoi("-17", &pos)); - EXPECT_EQ(3ul, pos); - } - { - size_t pos = 0; - EXPECT_EQ(1357, benchmark::stoi("1357", &pos)); - EXPECT_EQ(4ul, pos); - } - { - size_t pos = 0; - EXPECT_EQ(10, benchmark::stoi("1010", &pos, 2)); - EXPECT_EQ(4ul, pos); - } - { - size_t pos = 0; - EXPECT_EQ(520, benchmark::stoi("1010", &pos, 8)); - EXPECT_EQ(4ul, pos); - } - { - size_t pos = 0; - EXPECT_EQ(1010, benchmark::stoi("1010", &pos, 10)); - EXPECT_EQ(4ul, pos); - } - { - size_t pos = 0; - EXPECT_EQ(4112, benchmark::stoi("1010", &pos, 16)); - EXPECT_EQ(4ul, pos); - } - { - size_t pos = 0; - EXPECT_EQ(0xBEEF, benchmark::stoi("BEEF", &pos, 16)); - EXPECT_EQ(4ul, pos); - } -#ifndef BENCHMARK_HAS_NO_EXCEPTIONS - { - ASSERT_THROW(benchmark::stoi("this is a test"), std::invalid_argument); - } -#endif -} - -TEST(StringUtilTest, stod) { - { - size_t pos = 0; - EXPECT_EQ(0.0, benchmark::stod("0", &pos)); - EXPECT_EQ(1ul, pos); - } - { - size_t pos = 0; - EXPECT_EQ(-84.0, benchmark::stod("-84", &pos)); - EXPECT_EQ(3ul, pos); - } - { - size_t pos = 0; - EXPECT_EQ(1234.0, benchmark::stod("1234", &pos)); - EXPECT_EQ(4ul, pos); - } - { - size_t pos = 0; - EXPECT_EQ(1.5, benchmark::stod("1.5", &pos)); - EXPECT_EQ(3ul, pos); - } - { - size_t pos = 0; - /* Note: exactly representable as double */ - EXPECT_EQ(-1.25e+9, benchmark::stod("-1.25e+9", &pos)); - EXPECT_EQ(8ul, pos); - } -#ifndef BENCHMARK_HAS_NO_EXCEPTIONS - { - ASSERT_THROW(benchmark::stod("this is a test"), std::invalid_argument); - } -#endif -} - -} // end namespace diff --git a/benchmarks/thirdparty/benchmark/test/templated_fixture_test.cc b/benchmarks/thirdparty/benchmark/test/templated_fixture_test.cc deleted file mode 100755 index fe9865cc77..0000000000 --- a/benchmarks/thirdparty/benchmark/test/templated_fixture_test.cc +++ /dev/null @@ -1,28 +0,0 @@ - -#include "benchmark/benchmark.h" - -#include -#include - -template -class MyFixture : public ::benchmark::Fixture { - public: - MyFixture() : data(0) {} - - T data; -}; - -BENCHMARK_TEMPLATE_F(MyFixture, Foo, int)(benchmark::State& st) { - for (auto _ : st) { - data += 1; - } -} - -BENCHMARK_TEMPLATE_DEFINE_F(MyFixture, Bar, double)(benchmark::State& st) { - for (auto _ : st) { - data += 1.0; - } -} -BENCHMARK_REGISTER_F(MyFixture, Bar); - -BENCHMARK_MAIN(); diff --git a/benchmarks/thirdparty/benchmark/test/user_counters_tabular_test.cc b/benchmarks/thirdparty/benchmark/test/user_counters_tabular_test.cc deleted file mode 100755 index 18373c0aac..0000000000 --- a/benchmarks/thirdparty/benchmark/test/user_counters_tabular_test.cc +++ /dev/null @@ -1,285 +0,0 @@ - -#undef NDEBUG - -#include "benchmark/benchmark.h" -#include "output_test.h" - -// @todo: this checks the full output at once; the rule for -// CounterSet1 was failing because it was not matching "^[-]+$". -// @todo: check that the counters are vertically aligned. -ADD_CASES( - TC_ConsoleOut, - { - // keeping these lines long improves readability, so: - // clang-format off - {"^[-]+$", MR_Next}, - {"^Benchmark %s Time %s CPU %s Iterations %s Bar %s Bat %s Baz %s Foo %s Frob %s Lob$", MR_Next}, - {"^[-]+$", MR_Next}, - {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, - {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, - {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, - {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, - {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, - {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next}, - {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next}, - {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next}, - {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next}, - {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next}, - {"^[-]+$", MR_Next}, - {"^Benchmark %s Time %s CPU %s Iterations %s Bar %s Baz %s Foo$", MR_Next}, - {"^[-]+$", MR_Next}, - {"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, - {"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, - {"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, - {"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, - {"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, - {"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, - {"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, - {"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, - {"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, - {"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, - {"^[-]+$", MR_Next}, - {"^Benchmark %s Time %s CPU %s Iterations %s Bat %s Baz %s Foo$", MR_Next}, - {"^[-]+$", MR_Next}, - {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, - {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, - {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, - {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, - {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$"}, - // clang-format on - }); -ADD_CASES(TC_CSVOut, {{"%csv_header," - "\"Bar\",\"Bat\",\"Baz\",\"Foo\",\"Frob\",\"Lob\""}}); - -// ========================================================================= // -// ------------------------- Tabular Counters Output ----------------------- // -// ========================================================================= // - -void BM_Counters_Tabular(benchmark::State& state) { - for (auto _ : state) { - } - namespace bm = benchmark; - state.counters.insert({ - {"Foo", {1, bm::Counter::kAvgThreads}}, - {"Bar", {2, bm::Counter::kAvgThreads}}, - {"Baz", {4, bm::Counter::kAvgThreads}}, - {"Bat", {8, bm::Counter::kAvgThreads}}, - {"Frob", {16, bm::Counter::kAvgThreads}}, - {"Lob", {32, bm::Counter::kAvgThreads}}, - }); -} -BENCHMARK(BM_Counters_Tabular)->ThreadRange(1, 16); -ADD_CASES(TC_JSONOut, - {{"\"name\": \"BM_Counters_Tabular/threads:%int\",$"}, - {"\"run_name\": \"BM_Counters_Tabular/threads:%int\",$", MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, - {"\"repetition_index\": 0,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %float,$", MR_Next}, - {"\"cpu_time\": %float,$", MR_Next}, - {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"Bar\": %float,$", MR_Next}, - {"\"Bat\": %float,$", MR_Next}, - {"\"Baz\": %float,$", MR_Next}, - {"\"Foo\": %float,$", MR_Next}, - {"\"Frob\": %float,$", MR_Next}, - {"\"Lob\": %float$", MR_Next}, - {"}", MR_Next}}); -ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Tabular/threads:%int\",%csv_report," - "%float,%float,%float,%float,%float,%float$"}}); -// VS2013 does not allow this function to be passed as a lambda argument -// to CHECK_BENCHMARK_RESULTS() -void CheckTabular(Results const& e) { - CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 1); - CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 2); - CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 4); - CHECK_COUNTER_VALUE(e, int, "Bat", EQ, 8); - CHECK_COUNTER_VALUE(e, int, "Frob", EQ, 16); - CHECK_COUNTER_VALUE(e, int, "Lob", EQ, 32); -} -CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/threads:%int", &CheckTabular); - -// ========================================================================= // -// -------------------- Tabular+Rate Counters Output ----------------------- // -// ========================================================================= // - -void BM_CounterRates_Tabular(benchmark::State& state) { - for (auto _ : state) { - // This test requires a non-zero CPU time to avoid divide-by-zero - benchmark::DoNotOptimize(state.iterations()); - } - namespace bm = benchmark; - state.counters.insert({ - {"Foo", {1, bm::Counter::kAvgThreadsRate}}, - {"Bar", {2, bm::Counter::kAvgThreadsRate}}, - {"Baz", {4, bm::Counter::kAvgThreadsRate}}, - {"Bat", {8, bm::Counter::kAvgThreadsRate}}, - {"Frob", {16, bm::Counter::kAvgThreadsRate}}, - {"Lob", {32, bm::Counter::kAvgThreadsRate}}, - }); -} -BENCHMARK(BM_CounterRates_Tabular)->ThreadRange(1, 16); -ADD_CASES(TC_JSONOut, - {{"\"name\": \"BM_CounterRates_Tabular/threads:%int\",$"}, - {"\"run_name\": \"BM_CounterRates_Tabular/threads:%int\",$", - MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, - {"\"repetition_index\": 0,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %float,$", MR_Next}, - {"\"cpu_time\": %float,$", MR_Next}, - {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"Bar\": %float,$", MR_Next}, - {"\"Bat\": %float,$", MR_Next}, - {"\"Baz\": %float,$", MR_Next}, - {"\"Foo\": %float,$", MR_Next}, - {"\"Frob\": %float,$", MR_Next}, - {"\"Lob\": %float$", MR_Next}, - {"}", MR_Next}}); -ADD_CASES(TC_CSVOut, {{"^\"BM_CounterRates_Tabular/threads:%int\",%csv_report," - "%float,%float,%float,%float,%float,%float$"}}); -// VS2013 does not allow this function to be passed as a lambda argument -// to CHECK_BENCHMARK_RESULTS() -void CheckTabularRate(Results const& e) { - double t = e.DurationCPUTime(); - CHECK_FLOAT_COUNTER_VALUE(e, "Foo", EQ, 1. / t, 0.001); - CHECK_FLOAT_COUNTER_VALUE(e, "Bar", EQ, 2. / t, 0.001); - CHECK_FLOAT_COUNTER_VALUE(e, "Baz", EQ, 4. / t, 0.001); - CHECK_FLOAT_COUNTER_VALUE(e, "Bat", EQ, 8. / t, 0.001); - CHECK_FLOAT_COUNTER_VALUE(e, "Frob", EQ, 16. / t, 0.001); - CHECK_FLOAT_COUNTER_VALUE(e, "Lob", EQ, 32. / t, 0.001); -} -CHECK_BENCHMARK_RESULTS("BM_CounterRates_Tabular/threads:%int", - &CheckTabularRate); - -// ========================================================================= // -// ------------------------- Tabular Counters Output ----------------------- // -// ========================================================================= // - -// set only some of the counters -void BM_CounterSet0_Tabular(benchmark::State& state) { - for (auto _ : state) { - } - namespace bm = benchmark; - state.counters.insert({ - {"Foo", {10, bm::Counter::kAvgThreads}}, - {"Bar", {20, bm::Counter::kAvgThreads}}, - {"Baz", {40, bm::Counter::kAvgThreads}}, - }); -} -BENCHMARK(BM_CounterSet0_Tabular)->ThreadRange(1, 16); -ADD_CASES(TC_JSONOut, - {{"\"name\": \"BM_CounterSet0_Tabular/threads:%int\",$"}, - {"\"run_name\": \"BM_CounterSet0_Tabular/threads:%int\",$", MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, - {"\"repetition_index\": 0,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %float,$", MR_Next}, - {"\"cpu_time\": %float,$", MR_Next}, - {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"Bar\": %float,$", MR_Next}, - {"\"Baz\": %float,$", MR_Next}, - {"\"Foo\": %float$", MR_Next}, - {"}", MR_Next}}); -ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet0_Tabular/threads:%int\",%csv_report," - "%float,,%float,%float,,"}}); -// VS2013 does not allow this function to be passed as a lambda argument -// to CHECK_BENCHMARK_RESULTS() -void CheckSet0(Results const& e) { - CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 10); - CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 20); - CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 40); -} -CHECK_BENCHMARK_RESULTS("BM_CounterSet0_Tabular", &CheckSet0); - -// again. -void BM_CounterSet1_Tabular(benchmark::State& state) { - for (auto _ : state) { - } - namespace bm = benchmark; - state.counters.insert({ - {"Foo", {15, bm::Counter::kAvgThreads}}, - {"Bar", {25, bm::Counter::kAvgThreads}}, - {"Baz", {45, bm::Counter::kAvgThreads}}, - }); -} -BENCHMARK(BM_CounterSet1_Tabular)->ThreadRange(1, 16); -ADD_CASES(TC_JSONOut, - {{"\"name\": \"BM_CounterSet1_Tabular/threads:%int\",$"}, - {"\"run_name\": \"BM_CounterSet1_Tabular/threads:%int\",$", MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, - {"\"repetition_index\": 0,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %float,$", MR_Next}, - {"\"cpu_time\": %float,$", MR_Next}, - {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"Bar\": %float,$", MR_Next}, - {"\"Baz\": %float,$", MR_Next}, - {"\"Foo\": %float$", MR_Next}, - {"}", MR_Next}}); -ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet1_Tabular/threads:%int\",%csv_report," - "%float,,%float,%float,,"}}); -// VS2013 does not allow this function to be passed as a lambda argument -// to CHECK_BENCHMARK_RESULTS() -void CheckSet1(Results const& e) { - CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 15); - CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 25); - CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 45); -} -CHECK_BENCHMARK_RESULTS("BM_CounterSet1_Tabular/threads:%int", &CheckSet1); - -// ========================================================================= // -// ------------------------- Tabular Counters Output ----------------------- // -// ========================================================================= // - -// set only some of the counters, different set now. -void BM_CounterSet2_Tabular(benchmark::State& state) { - for (auto _ : state) { - } - namespace bm = benchmark; - state.counters.insert({ - {"Foo", {10, bm::Counter::kAvgThreads}}, - {"Bat", {30, bm::Counter::kAvgThreads}}, - {"Baz", {40, bm::Counter::kAvgThreads}}, - }); -} -BENCHMARK(BM_CounterSet2_Tabular)->ThreadRange(1, 16); -ADD_CASES(TC_JSONOut, - {{"\"name\": \"BM_CounterSet2_Tabular/threads:%int\",$"}, - {"\"run_name\": \"BM_CounterSet2_Tabular/threads:%int\",$", MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, - {"\"repetition_index\": 0,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %float,$", MR_Next}, - {"\"cpu_time\": %float,$", MR_Next}, - {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"Bat\": %float,$", MR_Next}, - {"\"Baz\": %float,$", MR_Next}, - {"\"Foo\": %float$", MR_Next}, - {"}", MR_Next}}); -ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet2_Tabular/threads:%int\",%csv_report," - ",%float,%float,%float,,"}}); -// VS2013 does not allow this function to be passed as a lambda argument -// to CHECK_BENCHMARK_RESULTS() -void CheckSet2(Results const& e) { - CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 10); - CHECK_COUNTER_VALUE(e, int, "Bat", EQ, 30); - CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 40); -} -CHECK_BENCHMARK_RESULTS("BM_CounterSet2_Tabular", &CheckSet2); - -// ========================================================================= // -// --------------------------- TEST CASES END ------------------------------ // -// ========================================================================= // - -int main(int argc, char* argv[]) { RunOutputTests(argc, argv); } diff --git a/benchmarks/thirdparty/benchmark/test/user_counters_test.cc b/benchmarks/thirdparty/benchmark/test/user_counters_test.cc deleted file mode 100755 index 5699f4f5e1..0000000000 --- a/benchmarks/thirdparty/benchmark/test/user_counters_test.cc +++ /dev/null @@ -1,531 +0,0 @@ - -#undef NDEBUG - -#include "benchmark/benchmark.h" -#include "output_test.h" - -// ========================================================================= // -// ---------------------- Testing Prologue Output -------------------------- // -// ========================================================================= // - -// clang-format off - -ADD_CASES(TC_ConsoleOut, - {{"^[-]+$", MR_Next}, - {"^Benchmark %s Time %s CPU %s Iterations UserCounters...$", MR_Next}, - {"^[-]+$", MR_Next}}); -ADD_CASES(TC_CSVOut, {{"%csv_header,\"bar\",\"foo\""}}); - -// clang-format on - -// ========================================================================= // -// ------------------------- Simple Counters Output ------------------------ // -// ========================================================================= // - -void BM_Counters_Simple(benchmark::State& state) { - for (auto _ : state) { - } - state.counters["foo"] = 1; - state.counters["bar"] = 2 * (double)state.iterations(); -} -BENCHMARK(BM_Counters_Simple); -ADD_CASES(TC_ConsoleOut, - {{"^BM_Counters_Simple %console_report bar=%hrfloat foo=%hrfloat$"}}); -ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Simple\",$"}, - {"\"run_name\": \"BM_Counters_Simple\",$", MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, - {"\"repetition_index\": 0,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %float,$", MR_Next}, - {"\"cpu_time\": %float,$", MR_Next}, - {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"bar\": %float,$", MR_Next}, - {"\"foo\": %float$", MR_Next}, - {"}", MR_Next}}); -ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Simple\",%csv_report,%float,%float$"}}); -// VS2013 does not allow this function to be passed as a lambda argument -// to CHECK_BENCHMARK_RESULTS() -void CheckSimple(Results const& e) { - double its = e.NumIterations(); - CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1); - // check that the value of bar is within 0.1% of the expected value - CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. * its, 0.001); -} -CHECK_BENCHMARK_RESULTS("BM_Counters_Simple", &CheckSimple); - -// ========================================================================= // -// --------------------- Counters+Items+Bytes/s Output --------------------- // -// ========================================================================= // - -namespace { -int num_calls1 = 0; -} -void BM_Counters_WithBytesAndItemsPSec(benchmark::State& state) { - for (auto _ : state) { - // This test requires a non-zero CPU time to avoid divide-by-zero - benchmark::DoNotOptimize(state.iterations()); - } - state.counters["foo"] = 1; - state.counters["bar"] = ++num_calls1; - state.SetBytesProcessed(364); - state.SetItemsProcessed(150); -} -BENCHMARK(BM_Counters_WithBytesAndItemsPSec); -ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_WithBytesAndItemsPSec %console_report " - "bar=%hrfloat bytes_per_second=%hrfloat/s " - "foo=%hrfloat items_per_second=%hrfloat/s$"}}); -ADD_CASES(TC_JSONOut, - {{"\"name\": \"BM_Counters_WithBytesAndItemsPSec\",$"}, - {"\"run_name\": \"BM_Counters_WithBytesAndItemsPSec\",$", MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, - {"\"repetition_index\": 0,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %float,$", MR_Next}, - {"\"cpu_time\": %float,$", MR_Next}, - {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"bar\": %float,$", MR_Next}, - {"\"bytes_per_second\": %float,$", MR_Next}, - {"\"foo\": %float,$", MR_Next}, - {"\"items_per_second\": %float$", MR_Next}, - {"}", MR_Next}}); -ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_WithBytesAndItemsPSec\"," - "%csv_bytes_items_report,%float,%float$"}}); -// VS2013 does not allow this function to be passed as a lambda argument -// to CHECK_BENCHMARK_RESULTS() -void CheckBytesAndItemsPSec(Results const& e) { - double t = e.DurationCPUTime(); // this (and not real time) is the time used - CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1); - CHECK_COUNTER_VALUE(e, int, "bar", EQ, num_calls1); - // check that the values are within 0.1% of the expected values - CHECK_FLOAT_RESULT_VALUE(e, "bytes_per_second", EQ, 364. / t, 0.001); - CHECK_FLOAT_RESULT_VALUE(e, "items_per_second", EQ, 150. / t, 0.001); -} -CHECK_BENCHMARK_RESULTS("BM_Counters_WithBytesAndItemsPSec", - &CheckBytesAndItemsPSec); - -// ========================================================================= // -// ------------------------- Rate Counters Output -------------------------- // -// ========================================================================= // - -void BM_Counters_Rate(benchmark::State& state) { - for (auto _ : state) { - // This test requires a non-zero CPU time to avoid divide-by-zero - benchmark::DoNotOptimize(state.iterations()); - } - namespace bm = benchmark; - state.counters["foo"] = bm::Counter{1, bm::Counter::kIsRate}; - state.counters["bar"] = bm::Counter{2, bm::Counter::kIsRate}; -} -BENCHMARK(BM_Counters_Rate); -ADD_CASES( - TC_ConsoleOut, - {{"^BM_Counters_Rate %console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); -ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Rate\",$"}, - {"\"run_name\": \"BM_Counters_Rate\",$", MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, - {"\"repetition_index\": 0,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %float,$", MR_Next}, - {"\"cpu_time\": %float,$", MR_Next}, - {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"bar\": %float,$", MR_Next}, - {"\"foo\": %float$", MR_Next}, - {"}", MR_Next}}); -ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Rate\",%csv_report,%float,%float$"}}); -// VS2013 does not allow this function to be passed as a lambda argument -// to CHECK_BENCHMARK_RESULTS() -void CheckRate(Results const& e) { - double t = e.DurationCPUTime(); // this (and not real time) is the time used - // check that the values are within 0.1% of the expected values - CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / t, 0.001); - CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / t, 0.001); -} -CHECK_BENCHMARK_RESULTS("BM_Counters_Rate", &CheckRate); - -// ========================================================================= // -// ----------------------- Inverted Counters Output ------------------------ // -// ========================================================================= // - -void BM_Invert(benchmark::State& state) { - for (auto _ : state) { - // This test requires a non-zero CPU time to avoid divide-by-zero - benchmark::DoNotOptimize(state.iterations()); - } - namespace bm = benchmark; - state.counters["foo"] = bm::Counter{0.0001, bm::Counter::kInvert}; - state.counters["bar"] = bm::Counter{10000, bm::Counter::kInvert}; -} -BENCHMARK(BM_Invert); -ADD_CASES(TC_ConsoleOut, - {{"^BM_Invert %console_report bar=%hrfloatu foo=%hrfloatk$"}}); -ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Invert\",$"}, - {"\"run_name\": \"BM_Invert\",$", MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, - {"\"repetition_index\": 0,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %float,$", MR_Next}, - {"\"cpu_time\": %float,$", MR_Next}, - {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"bar\": %float,$", MR_Next}, - {"\"foo\": %float$", MR_Next}, - {"}", MR_Next}}); -ADD_CASES(TC_CSVOut, {{"^\"BM_Invert\",%csv_report,%float,%float$"}}); -// VS2013 does not allow this function to be passed as a lambda argument -// to CHECK_BENCHMARK_RESULTS() -void CheckInvert(Results const& e) { - CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 10000, 0.0001); - CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 0.0001, 0.0001); -} -CHECK_BENCHMARK_RESULTS("BM_Invert", &CheckInvert); - -// ========================================================================= // -// ------------------------- InvertedRate Counters Output -// -------------------------- // -// ========================================================================= // - -void BM_Counters_InvertedRate(benchmark::State& state) { - for (auto _ : state) { - // This test requires a non-zero CPU time to avoid divide-by-zero - benchmark::DoNotOptimize(state.iterations()); - } - namespace bm = benchmark; - state.counters["foo"] = - bm::Counter{1, bm::Counter::kIsRate | bm::Counter::kInvert}; - state.counters["bar"] = - bm::Counter{8192, bm::Counter::kIsRate | bm::Counter::kInvert}; -} -BENCHMARK(BM_Counters_InvertedRate); -ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_InvertedRate %console_report " - "bar=%hrfloats foo=%hrfloats$"}}); -ADD_CASES(TC_JSONOut, - {{"\"name\": \"BM_Counters_InvertedRate\",$"}, - {"\"run_name\": \"BM_Counters_InvertedRate\",$", MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, - {"\"repetition_index\": 0,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %float,$", MR_Next}, - {"\"cpu_time\": %float,$", MR_Next}, - {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"bar\": %float,$", MR_Next}, - {"\"foo\": %float$", MR_Next}, - {"}", MR_Next}}); -ADD_CASES(TC_CSVOut, - {{"^\"BM_Counters_InvertedRate\",%csv_report,%float,%float$"}}); -// VS2013 does not allow this function to be passed as a lambda argument -// to CHECK_BENCHMARK_RESULTS() -void CheckInvertedRate(Results const& e) { - double t = e.DurationCPUTime(); // this (and not real time) is the time used - // check that the values are within 0.1% of the expected values - CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, t, 0.001); - CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, t / 8192.0, 0.001); -} -CHECK_BENCHMARK_RESULTS("BM_Counters_InvertedRate", &CheckInvertedRate); - -// ========================================================================= // -// ------------------------- Thread Counters Output ------------------------ // -// ========================================================================= // - -void BM_Counters_Threads(benchmark::State& state) { - for (auto _ : state) { - } - state.counters["foo"] = 1; - state.counters["bar"] = 2; -} -BENCHMARK(BM_Counters_Threads)->ThreadRange(1, 8); -ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Threads/threads:%int %console_report " - "bar=%hrfloat foo=%hrfloat$"}}); -ADD_CASES(TC_JSONOut, - {{"\"name\": \"BM_Counters_Threads/threads:%int\",$"}, - {"\"run_name\": \"BM_Counters_Threads/threads:%int\",$", MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, - {"\"repetition_index\": 0,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %float,$", MR_Next}, - {"\"cpu_time\": %float,$", MR_Next}, - {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"bar\": %float,$", MR_Next}, - {"\"foo\": %float$", MR_Next}, - {"}", MR_Next}}); -ADD_CASES( - TC_CSVOut, - {{"^\"BM_Counters_Threads/threads:%int\",%csv_report,%float,%float$"}}); -// VS2013 does not allow this function to be passed as a lambda argument -// to CHECK_BENCHMARK_RESULTS() -void CheckThreads(Results const& e) { - CHECK_COUNTER_VALUE(e, int, "foo", EQ, e.NumThreads()); - CHECK_COUNTER_VALUE(e, int, "bar", EQ, 2 * e.NumThreads()); -} -CHECK_BENCHMARK_RESULTS("BM_Counters_Threads/threads:%int", &CheckThreads); - -// ========================================================================= // -// ---------------------- ThreadAvg Counters Output ------------------------ // -// ========================================================================= // - -void BM_Counters_AvgThreads(benchmark::State& state) { - for (auto _ : state) { - } - namespace bm = benchmark; - state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreads}; - state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgThreads}; -} -BENCHMARK(BM_Counters_AvgThreads)->ThreadRange(1, 8); -ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreads/threads:%int " - "%console_report bar=%hrfloat foo=%hrfloat$"}}); -ADD_CASES(TC_JSONOut, - {{"\"name\": \"BM_Counters_AvgThreads/threads:%int\",$"}, - {"\"run_name\": \"BM_Counters_AvgThreads/threads:%int\",$", MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, - {"\"repetition_index\": 0,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %float,$", MR_Next}, - {"\"cpu_time\": %float,$", MR_Next}, - {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"bar\": %float,$", MR_Next}, - {"\"foo\": %float$", MR_Next}, - {"}", MR_Next}}); -ADD_CASES( - TC_CSVOut, - {{"^\"BM_Counters_AvgThreads/threads:%int\",%csv_report,%float,%float$"}}); -// VS2013 does not allow this function to be passed as a lambda argument -// to CHECK_BENCHMARK_RESULTS() -void CheckAvgThreads(Results const& e) { - CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1); - CHECK_COUNTER_VALUE(e, int, "bar", EQ, 2); -} -CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreads/threads:%int", - &CheckAvgThreads); - -// ========================================================================= // -// ---------------------- ThreadAvg Counters Output ------------------------ // -// ========================================================================= // - -void BM_Counters_AvgThreadsRate(benchmark::State& state) { - for (auto _ : state) { - // This test requires a non-zero CPU time to avoid divide-by-zero - benchmark::DoNotOptimize(state.iterations()); - } - namespace bm = benchmark; - state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreadsRate}; - state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgThreadsRate}; -} -BENCHMARK(BM_Counters_AvgThreadsRate)->ThreadRange(1, 8); -ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreadsRate/threads:%int " - "%console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); -ADD_CASES(TC_JSONOut, - {{"\"name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$"}, - {"\"run_name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$", - MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, - {"\"repetition_index\": 0,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %float,$", MR_Next}, - {"\"cpu_time\": %float,$", MR_Next}, - {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"bar\": %float,$", MR_Next}, - {"\"foo\": %float$", MR_Next}, - {"}", MR_Next}}); -ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_AvgThreadsRate/" - "threads:%int\",%csv_report,%float,%float$"}}); -// VS2013 does not allow this function to be passed as a lambda argument -// to CHECK_BENCHMARK_RESULTS() -void CheckAvgThreadsRate(Results const& e) { - CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / e.DurationCPUTime(), 0.001); - CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / e.DurationCPUTime(), 0.001); -} -CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreadsRate/threads:%int", - &CheckAvgThreadsRate); - -// ========================================================================= // -// ------------------- IterationInvariant Counters Output ------------------ // -// ========================================================================= // - -void BM_Counters_IterationInvariant(benchmark::State& state) { - for (auto _ : state) { - } - namespace bm = benchmark; - state.counters["foo"] = bm::Counter{1, bm::Counter::kIsIterationInvariant}; - state.counters["bar"] = bm::Counter{2, bm::Counter::kIsIterationInvariant}; -} -BENCHMARK(BM_Counters_IterationInvariant); -ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_IterationInvariant %console_report " - "bar=%hrfloat foo=%hrfloat$"}}); -ADD_CASES(TC_JSONOut, - {{"\"name\": \"BM_Counters_IterationInvariant\",$"}, - {"\"run_name\": \"BM_Counters_IterationInvariant\",$", MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, - {"\"repetition_index\": 0,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %float,$", MR_Next}, - {"\"cpu_time\": %float,$", MR_Next}, - {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"bar\": %float,$", MR_Next}, - {"\"foo\": %float$", MR_Next}, - {"}", MR_Next}}); -ADD_CASES(TC_CSVOut, - {{"^\"BM_Counters_IterationInvariant\",%csv_report,%float,%float$"}}); -// VS2013 does not allow this function to be passed as a lambda argument -// to CHECK_BENCHMARK_RESULTS() -void CheckIterationInvariant(Results const& e) { - double its = e.NumIterations(); - // check that the values are within 0.1% of the expected value - CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, its, 0.001); - CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. * its, 0.001); -} -CHECK_BENCHMARK_RESULTS("BM_Counters_IterationInvariant", - &CheckIterationInvariant); - -// ========================================================================= // -// ----------------- IterationInvariantRate Counters Output ---------------- // -// ========================================================================= // - -void BM_Counters_kIsIterationInvariantRate(benchmark::State& state) { - for (auto _ : state) { - // This test requires a non-zero CPU time to avoid divide-by-zero - benchmark::DoNotOptimize(state.iterations()); - } - namespace bm = benchmark; - state.counters["foo"] = - bm::Counter{1, bm::Counter::kIsIterationInvariantRate}; - state.counters["bar"] = - bm::Counter{2, bm::Counter::kIsRate | bm::Counter::kIsIterationInvariant}; -} -BENCHMARK(BM_Counters_kIsIterationInvariantRate); -ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kIsIterationInvariantRate " - "%console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); -ADD_CASES(TC_JSONOut, - {{"\"name\": \"BM_Counters_kIsIterationInvariantRate\",$"}, - {"\"run_name\": \"BM_Counters_kIsIterationInvariantRate\",$", - MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, - {"\"repetition_index\": 0,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %float,$", MR_Next}, - {"\"cpu_time\": %float,$", MR_Next}, - {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"bar\": %float,$", MR_Next}, - {"\"foo\": %float$", MR_Next}, - {"}", MR_Next}}); -ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_kIsIterationInvariantRate\",%csv_report," - "%float,%float$"}}); -// VS2013 does not allow this function to be passed as a lambda argument -// to CHECK_BENCHMARK_RESULTS() -void CheckIsIterationInvariantRate(Results const& e) { - double its = e.NumIterations(); - double t = e.DurationCPUTime(); // this (and not real time) is the time used - // check that the values are within 0.1% of the expected values - CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, its * 1. / t, 0.001); - CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, its * 2. / t, 0.001); -} -CHECK_BENCHMARK_RESULTS("BM_Counters_kIsIterationInvariantRate", - &CheckIsIterationInvariantRate); - -// ========================================================================= // -// ------------------- AvgIterations Counters Output ------------------ // -// ========================================================================= // - -void BM_Counters_AvgIterations(benchmark::State& state) { - for (auto _ : state) { - } - namespace bm = benchmark; - state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterations}; - state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgIterations}; -} -BENCHMARK(BM_Counters_AvgIterations); -ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgIterations %console_report " - "bar=%hrfloat foo=%hrfloat$"}}); -ADD_CASES(TC_JSONOut, - {{"\"name\": \"BM_Counters_AvgIterations\",$"}, - {"\"run_name\": \"BM_Counters_AvgIterations\",$", MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, - {"\"repetition_index\": 0,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %float,$", MR_Next}, - {"\"cpu_time\": %float,$", MR_Next}, - {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"bar\": %float,$", MR_Next}, - {"\"foo\": %float$", MR_Next}, - {"}", MR_Next}}); -ADD_CASES(TC_CSVOut, - {{"^\"BM_Counters_AvgIterations\",%csv_report,%float,%float$"}}); -// VS2013 does not allow this function to be passed as a lambda argument -// to CHECK_BENCHMARK_RESULTS() -void CheckAvgIterations(Results const& e) { - double its = e.NumIterations(); - // check that the values are within 0.1% of the expected value - CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / its, 0.001); - CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / its, 0.001); -} -CHECK_BENCHMARK_RESULTS("BM_Counters_AvgIterations", &CheckAvgIterations); - -// ========================================================================= // -// ----------------- AvgIterationsRate Counters Output ---------------- // -// ========================================================================= // - -void BM_Counters_kAvgIterationsRate(benchmark::State& state) { - for (auto _ : state) { - // This test requires a non-zero CPU time to avoid divide-by-zero - benchmark::DoNotOptimize(state.iterations()); - } - namespace bm = benchmark; - state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterationsRate}; - state.counters["bar"] = - bm::Counter{2, bm::Counter::kIsRate | bm::Counter::kAvgIterations}; -} -BENCHMARK(BM_Counters_kAvgIterationsRate); -ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kAvgIterationsRate " - "%console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); -ADD_CASES(TC_JSONOut, - {{"\"name\": \"BM_Counters_kAvgIterationsRate\",$"}, - {"\"run_name\": \"BM_Counters_kAvgIterationsRate\",$", MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, - {"\"repetition_index\": 0,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %float,$", MR_Next}, - {"\"cpu_time\": %float,$", MR_Next}, - {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"bar\": %float,$", MR_Next}, - {"\"foo\": %float$", MR_Next}, - {"}", MR_Next}}); -ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_kAvgIterationsRate\",%csv_report," - "%float,%float$"}}); -// VS2013 does not allow this function to be passed as a lambda argument -// to CHECK_BENCHMARK_RESULTS() -void CheckAvgIterationsRate(Results const& e) { - double its = e.NumIterations(); - double t = e.DurationCPUTime(); // this (and not real time) is the time used - // check that the values are within 0.1% of the expected values - CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / its / t, 0.001); - CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / its / t, 0.001); -} -CHECK_BENCHMARK_RESULTS("BM_Counters_kAvgIterationsRate", - &CheckAvgIterationsRate); - -// ========================================================================= // -// --------------------------- TEST CASES END ------------------------------ // -// ========================================================================= // - -int main(int argc, char* argv[]) { RunOutputTests(argc, argv); } diff --git a/benchmarks/thirdparty/benchmark/test/user_counters_thousands_test.cc b/benchmarks/thirdparty/benchmark/test/user_counters_thousands_test.cc deleted file mode 100755 index 21d8285ded..0000000000 --- a/benchmarks/thirdparty/benchmark/test/user_counters_thousands_test.cc +++ /dev/null @@ -1,173 +0,0 @@ - -#undef NDEBUG - -#include "benchmark/benchmark.h" -#include "output_test.h" - -// ========================================================================= // -// ------------------------ Thousands Customisation ------------------------ // -// ========================================================================= // - -void BM_Counters_Thousands(benchmark::State& state) { - for (auto _ : state) { - } - namespace bm = benchmark; - state.counters.insert({ - {"t0_1000000DefaultBase", - bm::Counter(1000 * 1000, bm::Counter::kDefaults)}, - {"t1_1000000Base1000", bm::Counter(1000 * 1000, bm::Counter::kDefaults, - benchmark::Counter::OneK::kIs1000)}, - {"t2_1000000Base1024", bm::Counter(1000 * 1000, bm::Counter::kDefaults, - benchmark::Counter::OneK::kIs1024)}, - {"t3_1048576Base1000", bm::Counter(1024 * 1024, bm::Counter::kDefaults, - benchmark::Counter::OneK::kIs1000)}, - {"t4_1048576Base1024", bm::Counter(1024 * 1024, bm::Counter::kDefaults, - benchmark::Counter::OneK::kIs1024)}, - }); -} -BENCHMARK(BM_Counters_Thousands)->Repetitions(2); -ADD_CASES( - TC_ConsoleOut, - { - {"^BM_Counters_Thousands/repeats:2 %console_report " - "t0_1000000DefaultBase=1000k " - "t1_1000000Base1000=1000k t2_1000000Base1024=976.56[23]k " - "t3_1048576Base1000=1048.58k t4_1048576Base1024=1024k$"}, - {"^BM_Counters_Thousands/repeats:2 %console_report " - "t0_1000000DefaultBase=1000k " - "t1_1000000Base1000=1000k t2_1000000Base1024=976.56[23]k " - "t3_1048576Base1000=1048.58k t4_1048576Base1024=1024k$"}, - {"^BM_Counters_Thousands/repeats:2_mean %console_report " - "t0_1000000DefaultBase=1000k t1_1000000Base1000=1000k " - "t2_1000000Base1024=976.56[23]k t3_1048576Base1000=1048.58k " - "t4_1048576Base1024=1024k$"}, - {"^BM_Counters_Thousands/repeats:2_median %console_report " - "t0_1000000DefaultBase=1000k t1_1000000Base1000=1000k " - "t2_1000000Base1024=976.56[23]k t3_1048576Base1000=1048.58k " - "t4_1048576Base1024=1024k$"}, - {"^BM_Counters_Thousands/repeats:2_stddev %console_time_only_report [ " - "]*2 t0_1000000DefaultBase=0 t1_1000000Base1000=0 " - "t2_1000000Base1024=0 t3_1048576Base1000=0 t4_1048576Base1024=0$"}, - }); -ADD_CASES(TC_JSONOut, - {{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"}, - {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 2,$", MR_Next}, - {"\"repetition_index\": 0,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %float,$", MR_Next}, - {"\"cpu_time\": %float,$", MR_Next}, - {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, - {"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, - {"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, - {"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next}, - {"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next}, - {"}", MR_Next}}); -ADD_CASES(TC_JSONOut, - {{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"}, - {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next}, - {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 2,$", MR_Next}, - {"\"repetition_index\": 1,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %float,$", MR_Next}, - {"\"cpu_time\": %float,$", MR_Next}, - {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, - {"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, - {"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, - {"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next}, - {"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next}, - {"}", MR_Next}}); -ADD_CASES(TC_JSONOut, - {{"\"name\": \"BM_Counters_Thousands/repeats:2_mean\",$"}, - {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next}, - {"\"run_type\": \"aggregate\",$", MR_Next}, - {"\"repetitions\": 2,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"aggregate_name\": \"mean\",$", MR_Next}, - {"\"iterations\": 2,$", MR_Next}, - {"\"real_time\": %float,$", MR_Next}, - {"\"cpu_time\": %float,$", MR_Next}, - {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, - {"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, - {"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, - {"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next}, - {"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next}, - {"}", MR_Next}}); -ADD_CASES(TC_JSONOut, - {{"\"name\": \"BM_Counters_Thousands/repeats:2_median\",$"}, - {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next}, - {"\"run_type\": \"aggregate\",$", MR_Next}, - {"\"repetitions\": 2,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"aggregate_name\": \"median\",$", MR_Next}, - {"\"iterations\": 2,$", MR_Next}, - {"\"real_time\": %float,$", MR_Next}, - {"\"cpu_time\": %float,$", MR_Next}, - {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, - {"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, - {"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, - {"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next}, - {"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next}, - {"}", MR_Next}}); -ADD_CASES(TC_JSONOut, - {{"\"name\": \"BM_Counters_Thousands/repeats:2_stddev\",$"}, - {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next}, - {"\"run_type\": \"aggregate\",$", MR_Next}, - {"\"repetitions\": 2,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"aggregate_name\": \"stddev\",$", MR_Next}, - {"\"iterations\": 2,$", MR_Next}, - {"\"real_time\": %float,$", MR_Next}, - {"\"cpu_time\": %float,$", MR_Next}, - {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"t0_1000000DefaultBase\": 0\\.(0)*e\\+(0)*,$", MR_Next}, - {"\"t1_1000000Base1000\": 0\\.(0)*e\\+(0)*,$", MR_Next}, - {"\"t2_1000000Base1024\": 0\\.(0)*e\\+(0)*,$", MR_Next}, - {"\"t3_1048576Base1000\": 0\\.(0)*e\\+(0)*,$", MR_Next}, - {"\"t4_1048576Base1024\": 0\\.(0)*e\\+(0)*$", MR_Next}, - {"}", MR_Next}}); - -ADD_CASES( - TC_CSVOut, - {{"^\"BM_Counters_Thousands/" - "repeats:2\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\.04858e\\+(" - "0)*6,1\\.04858e\\+(0)*6$"}, - {"^\"BM_Counters_Thousands/" - "repeats:2\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\.04858e\\+(" - "0)*6,1\\.04858e\\+(0)*6$"}, - {"^\"BM_Counters_Thousands/" - "repeats:2_mean\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\." - "04858e\\+(0)*6,1\\.04858e\\+(0)*6$"}, - {"^\"BM_Counters_Thousands/" - "repeats:2_median\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\." - "04858e\\+(0)*6,1\\.04858e\\+(0)*6$"}, - {"^\"BM_Counters_Thousands/repeats:2_stddev\",%csv_report,0,0,0,0,0$"}}); -// VS2013 does not allow this function to be passed as a lambda argument -// to CHECK_BENCHMARK_RESULTS() -void CheckThousands(Results const& e) { - if (e.name != "BM_Counters_Thousands/repeats:2") - return; // Do not check the aggregates! - - // check that the values are within 0.01% of the expected values - CHECK_FLOAT_COUNTER_VALUE(e, "t0_1000000DefaultBase", EQ, 1000 * 1000, - 0.0001); - CHECK_FLOAT_COUNTER_VALUE(e, "t1_1000000Base1000", EQ, 1000 * 1000, 0.0001); - CHECK_FLOAT_COUNTER_VALUE(e, "t2_1000000Base1024", EQ, 1000 * 1000, 0.0001); - CHECK_FLOAT_COUNTER_VALUE(e, "t3_1048576Base1000", EQ, 1024 * 1024, 0.0001); - CHECK_FLOAT_COUNTER_VALUE(e, "t4_1048576Base1024", EQ, 1024 * 1024, 0.0001); -} -CHECK_BENCHMARK_RESULTS("BM_Counters_Thousands", &CheckThousands); - -// ========================================================================= // -// --------------------------- TEST CASES END ------------------------------ // -// ========================================================================= // - -int main(int argc, char* argv[]) { RunOutputTests(argc, argv); } diff --git a/benchmarks/thirdparty/benchmark/tools/compare.py b/benchmarks/thirdparty/benchmark/tools/compare.py deleted file mode 100755 index bd01be57cd..0000000000 --- a/benchmarks/thirdparty/benchmark/tools/compare.py +++ /dev/null @@ -1,416 +0,0 @@ -#!/usr/bin/env python - -import unittest -""" -compare.py - versatile benchmark output compare tool -""" - -import argparse -from argparse import ArgumentParser -import sys -import gbench -from gbench import util, report -from gbench.util import * - - -def check_inputs(in1, in2, flags): - """ - Perform checking on the user provided inputs and diagnose any abnormalities - """ - in1_kind, in1_err = classify_input_file(in1) - in2_kind, in2_err = classify_input_file(in2) - output_file = find_benchmark_flag('--benchmark_out=', flags) - output_type = find_benchmark_flag('--benchmark_out_format=', flags) - if in1_kind == IT_Executable and in2_kind == IT_Executable and output_file: - print(("WARNING: '--benchmark_out=%s' will be passed to both " - "benchmarks causing it to be overwritten") % output_file) - if in1_kind == IT_JSON and in2_kind == IT_JSON and len(flags) > 0: - print("WARNING: passing optional flags has no effect since both " - "inputs are JSON") - if output_type is not None and output_type != 'json': - print(("ERROR: passing '--benchmark_out_format=%s' to 'compare.py`" - " is not supported.") % output_type) - sys.exit(1) - - -def create_parser(): - parser = ArgumentParser( - description='versatile benchmark output compare tool') - - parser.add_argument( - '-a', - '--display_aggregates_only', - dest='display_aggregates_only', - action="store_true", - help="If there are repetitions, by default, we display everything - the" - " actual runs, and the aggregates computed. Sometimes, it is " - "desirable to only view the aggregates. E.g. when there are a lot " - "of repetitions. Do note that only the display is affected. " - "Internally, all the actual runs are still used, e.g. for U test.") - - parser.add_argument( - '--no-color', - dest='color', - default=True, - action="store_false", - help="Do not use colors in the terminal output" - ) - - utest = parser.add_argument_group() - utest.add_argument( - '--no-utest', - dest='utest', - default=True, - action="store_false", - help="The tool can do a two-tailed Mann-Whitney U test with the null hypothesis that it is equally likely that a randomly selected value from one sample will be less than or greater than a randomly selected value from a second sample.\nWARNING: requires **LARGE** (no less than {}) number of repetitions to be meaningful!\nThe test is being done by default, if at least {} repetitions were done.\nThis option can disable the U Test.".format(report.UTEST_OPTIMAL_REPETITIONS, report.UTEST_MIN_REPETITIONS)) - alpha_default = 0.05 - utest.add_argument( - "--alpha", - dest='utest_alpha', - default=alpha_default, - type=float, - help=("significance level alpha. if the calculated p-value is below this value, then the result is said to be statistically significant and the null hypothesis is rejected.\n(default: %0.4f)") % - alpha_default) - - subparsers = parser.add_subparsers( - help='This tool has multiple modes of operation:', - dest='mode') - - parser_a = subparsers.add_parser( - 'benchmarks', - help='The most simple use-case, compare all the output of these two benchmarks') - baseline = parser_a.add_argument_group( - 'baseline', 'The benchmark baseline') - baseline.add_argument( - 'test_baseline', - metavar='test_baseline', - type=argparse.FileType('r'), - nargs=1, - help='A benchmark executable or JSON output file') - contender = parser_a.add_argument_group( - 'contender', 'The benchmark that will be compared against the baseline') - contender.add_argument( - 'test_contender', - metavar='test_contender', - type=argparse.FileType('r'), - nargs=1, - help='A benchmark executable or JSON output file') - parser_a.add_argument( - 'benchmark_options', - metavar='benchmark_options', - nargs=argparse.REMAINDER, - help='Arguments to pass when running benchmark executables') - - parser_b = subparsers.add_parser( - 'filters', help='Compare filter one with the filter two of benchmark') - baseline = parser_b.add_argument_group( - 'baseline', 'The benchmark baseline') - baseline.add_argument( - 'test', - metavar='test', - type=argparse.FileType('r'), - nargs=1, - help='A benchmark executable or JSON output file') - baseline.add_argument( - 'filter_baseline', - metavar='filter_baseline', - type=str, - nargs=1, - help='The first filter, that will be used as baseline') - contender = parser_b.add_argument_group( - 'contender', 'The benchmark that will be compared against the baseline') - contender.add_argument( - 'filter_contender', - metavar='filter_contender', - type=str, - nargs=1, - help='The second filter, that will be compared against the baseline') - parser_b.add_argument( - 'benchmark_options', - metavar='benchmark_options', - nargs=argparse.REMAINDER, - help='Arguments to pass when running benchmark executables') - - parser_c = subparsers.add_parser( - 'benchmarksfiltered', - help='Compare filter one of first benchmark with filter two of the second benchmark') - baseline = parser_c.add_argument_group( - 'baseline', 'The benchmark baseline') - baseline.add_argument( - 'test_baseline', - metavar='test_baseline', - type=argparse.FileType('r'), - nargs=1, - help='A benchmark executable or JSON output file') - baseline.add_argument( - 'filter_baseline', - metavar='filter_baseline', - type=str, - nargs=1, - help='The first filter, that will be used as baseline') - contender = parser_c.add_argument_group( - 'contender', 'The benchmark that will be compared against the baseline') - contender.add_argument( - 'test_contender', - metavar='test_contender', - type=argparse.FileType('r'), - nargs=1, - help='The second benchmark executable or JSON output file, that will be compared against the baseline') - contender.add_argument( - 'filter_contender', - metavar='filter_contender', - type=str, - nargs=1, - help='The second filter, that will be compared against the baseline') - parser_c.add_argument( - 'benchmark_options', - metavar='benchmark_options', - nargs=argparse.REMAINDER, - help='Arguments to pass when running benchmark executables') - - return parser - - -def main(): - # Parse the command line flags - parser = create_parser() - args, unknown_args = parser.parse_known_args() - if args.mode is None: - parser.print_help() - exit(1) - assert not unknown_args - benchmark_options = args.benchmark_options - - if args.mode == 'benchmarks': - test_baseline = args.test_baseline[0].name - test_contender = args.test_contender[0].name - filter_baseline = '' - filter_contender = '' - - # NOTE: if test_baseline == test_contender, you are analyzing the stdev - - description = 'Comparing %s to %s' % (test_baseline, test_contender) - elif args.mode == 'filters': - test_baseline = args.test[0].name - test_contender = args.test[0].name - filter_baseline = args.filter_baseline[0] - filter_contender = args.filter_contender[0] - - # NOTE: if filter_baseline == filter_contender, you are analyzing the - # stdev - - description = 'Comparing %s to %s (from %s)' % ( - filter_baseline, filter_contender, args.test[0].name) - elif args.mode == 'benchmarksfiltered': - test_baseline = args.test_baseline[0].name - test_contender = args.test_contender[0].name - filter_baseline = args.filter_baseline[0] - filter_contender = args.filter_contender[0] - - # NOTE: if test_baseline == test_contender and - # filter_baseline == filter_contender, you are analyzing the stdev - - description = 'Comparing %s (from %s) to %s (from %s)' % ( - filter_baseline, test_baseline, filter_contender, test_contender) - else: - # should never happen - print("Unrecognized mode of operation: '%s'" % args.mode) - parser.print_help() - exit(1) - - check_inputs(test_baseline, test_contender, benchmark_options) - - if args.display_aggregates_only: - benchmark_options += ['--benchmark_display_aggregates_only=true'] - - options_baseline = [] - options_contender = [] - - if filter_baseline and filter_contender: - options_baseline = ['--benchmark_filter=%s' % filter_baseline] - options_contender = ['--benchmark_filter=%s' % filter_contender] - - # Run the benchmarks and report the results - json1 = json1_orig = gbench.util.run_or_load_benchmark( - test_baseline, benchmark_options + options_baseline) - json2 = json2_orig = gbench.util.run_or_load_benchmark( - test_contender, benchmark_options + options_contender) - - # Now, filter the benchmarks so that the difference report can work - if filter_baseline and filter_contender: - replacement = '[%s vs. %s]' % (filter_baseline, filter_contender) - json1 = gbench.report.filter_benchmark( - json1_orig, filter_baseline, replacement) - json2 = gbench.report.filter_benchmark( - json2_orig, filter_contender, replacement) - - # Diff and output - output_lines = gbench.report.generate_difference_report( - json1, json2, args.display_aggregates_only, - args.utest, args.utest_alpha, args.color) - print(description) - for ln in output_lines: - print(ln) - - -class TestParser(unittest.TestCase): - def setUp(self): - self.parser = create_parser() - testInputs = os.path.join( - os.path.dirname( - os.path.realpath(__file__)), - 'gbench', - 'Inputs') - self.testInput0 = os.path.join(testInputs, 'test1_run1.json') - self.testInput1 = os.path.join(testInputs, 'test1_run2.json') - - def test_benchmarks_basic(self): - parsed = self.parser.parse_args( - ['benchmarks', self.testInput0, self.testInput1]) - self.assertFalse(parsed.display_aggregates_only) - self.assertTrue(parsed.utest) - self.assertEqual(parsed.mode, 'benchmarks') - self.assertEqual(parsed.test_baseline[0].name, self.testInput0) - self.assertEqual(parsed.test_contender[0].name, self.testInput1) - self.assertFalse(parsed.benchmark_options) - - def test_benchmarks_basic_without_utest(self): - parsed = self.parser.parse_args( - ['--no-utest', 'benchmarks', self.testInput0, self.testInput1]) - self.assertFalse(parsed.display_aggregates_only) - self.assertFalse(parsed.utest) - self.assertEqual(parsed.utest_alpha, 0.05) - self.assertEqual(parsed.mode, 'benchmarks') - self.assertEqual(parsed.test_baseline[0].name, self.testInput0) - self.assertEqual(parsed.test_contender[0].name, self.testInput1) - self.assertFalse(parsed.benchmark_options) - - def test_benchmarks_basic_display_aggregates_only(self): - parsed = self.parser.parse_args( - ['-a', 'benchmarks', self.testInput0, self.testInput1]) - self.assertTrue(parsed.display_aggregates_only) - self.assertTrue(parsed.utest) - self.assertEqual(parsed.mode, 'benchmarks') - self.assertEqual(parsed.test_baseline[0].name, self.testInput0) - self.assertEqual(parsed.test_contender[0].name, self.testInput1) - self.assertFalse(parsed.benchmark_options) - - def test_benchmarks_basic_with_utest_alpha(self): - parsed = self.parser.parse_args( - ['--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1]) - self.assertFalse(parsed.display_aggregates_only) - self.assertTrue(parsed.utest) - self.assertEqual(parsed.utest_alpha, 0.314) - self.assertEqual(parsed.mode, 'benchmarks') - self.assertEqual(parsed.test_baseline[0].name, self.testInput0) - self.assertEqual(parsed.test_contender[0].name, self.testInput1) - self.assertFalse(parsed.benchmark_options) - - def test_benchmarks_basic_without_utest_with_utest_alpha(self): - parsed = self.parser.parse_args( - ['--no-utest', '--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1]) - self.assertFalse(parsed.display_aggregates_only) - self.assertFalse(parsed.utest) - self.assertEqual(parsed.utest_alpha, 0.314) - self.assertEqual(parsed.mode, 'benchmarks') - self.assertEqual(parsed.test_baseline[0].name, self.testInput0) - self.assertEqual(parsed.test_contender[0].name, self.testInput1) - self.assertFalse(parsed.benchmark_options) - - def test_benchmarks_with_remainder(self): - parsed = self.parser.parse_args( - ['benchmarks', self.testInput0, self.testInput1, 'd']) - self.assertFalse(parsed.display_aggregates_only) - self.assertTrue(parsed.utest) - self.assertEqual(parsed.mode, 'benchmarks') - self.assertEqual(parsed.test_baseline[0].name, self.testInput0) - self.assertEqual(parsed.test_contender[0].name, self.testInput1) - self.assertEqual(parsed.benchmark_options, ['d']) - - def test_benchmarks_with_remainder_after_doubleminus(self): - parsed = self.parser.parse_args( - ['benchmarks', self.testInput0, self.testInput1, '--', 'e']) - self.assertFalse(parsed.display_aggregates_only) - self.assertTrue(parsed.utest) - self.assertEqual(parsed.mode, 'benchmarks') - self.assertEqual(parsed.test_baseline[0].name, self.testInput0) - self.assertEqual(parsed.test_contender[0].name, self.testInput1) - self.assertEqual(parsed.benchmark_options, ['e']) - - def test_filters_basic(self): - parsed = self.parser.parse_args( - ['filters', self.testInput0, 'c', 'd']) - self.assertFalse(parsed.display_aggregates_only) - self.assertTrue(parsed.utest) - self.assertEqual(parsed.mode, 'filters') - self.assertEqual(parsed.test[0].name, self.testInput0) - self.assertEqual(parsed.filter_baseline[0], 'c') - self.assertEqual(parsed.filter_contender[0], 'd') - self.assertFalse(parsed.benchmark_options) - - def test_filters_with_remainder(self): - parsed = self.parser.parse_args( - ['filters', self.testInput0, 'c', 'd', 'e']) - self.assertFalse(parsed.display_aggregates_only) - self.assertTrue(parsed.utest) - self.assertEqual(parsed.mode, 'filters') - self.assertEqual(parsed.test[0].name, self.testInput0) - self.assertEqual(parsed.filter_baseline[0], 'c') - self.assertEqual(parsed.filter_contender[0], 'd') - self.assertEqual(parsed.benchmark_options, ['e']) - - def test_filters_with_remainder_after_doubleminus(self): - parsed = self.parser.parse_args( - ['filters', self.testInput0, 'c', 'd', '--', 'f']) - self.assertFalse(parsed.display_aggregates_only) - self.assertTrue(parsed.utest) - self.assertEqual(parsed.mode, 'filters') - self.assertEqual(parsed.test[0].name, self.testInput0) - self.assertEqual(parsed.filter_baseline[0], 'c') - self.assertEqual(parsed.filter_contender[0], 'd') - self.assertEqual(parsed.benchmark_options, ['f']) - - def test_benchmarksfiltered_basic(self): - parsed = self.parser.parse_args( - ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e']) - self.assertFalse(parsed.display_aggregates_only) - self.assertTrue(parsed.utest) - self.assertEqual(parsed.mode, 'benchmarksfiltered') - self.assertEqual(parsed.test_baseline[0].name, self.testInput0) - self.assertEqual(parsed.filter_baseline[0], 'c') - self.assertEqual(parsed.test_contender[0].name, self.testInput1) - self.assertEqual(parsed.filter_contender[0], 'e') - self.assertFalse(parsed.benchmark_options) - - def test_benchmarksfiltered_with_remainder(self): - parsed = self.parser.parse_args( - ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', 'f']) - self.assertFalse(parsed.display_aggregates_only) - self.assertTrue(parsed.utest) - self.assertEqual(parsed.mode, 'benchmarksfiltered') - self.assertEqual(parsed.test_baseline[0].name, self.testInput0) - self.assertEqual(parsed.filter_baseline[0], 'c') - self.assertEqual(parsed.test_contender[0].name, self.testInput1) - self.assertEqual(parsed.filter_contender[0], 'e') - self.assertEqual(parsed.benchmark_options[0], 'f') - - def test_benchmarksfiltered_with_remainder_after_doubleminus(self): - parsed = self.parser.parse_args( - ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', '--', 'g']) - self.assertFalse(parsed.display_aggregates_only) - self.assertTrue(parsed.utest) - self.assertEqual(parsed.mode, 'benchmarksfiltered') - self.assertEqual(parsed.test_baseline[0].name, self.testInput0) - self.assertEqual(parsed.filter_baseline[0], 'c') - self.assertEqual(parsed.test_contender[0].name, self.testInput1) - self.assertEqual(parsed.filter_contender[0], 'e') - self.assertEqual(parsed.benchmark_options[0], 'g') - - -if __name__ == '__main__': - # unittest.main() - main() - -# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 -# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off; -# kate: indent-mode python; remove-trailing-spaces modified; diff --git a/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test1_run1.json b/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test1_run1.json deleted file mode 100755 index 601e327aef..0000000000 --- a/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test1_run1.json +++ /dev/null @@ -1,119 +0,0 @@ -{ - "context": { - "date": "2016-08-02 17:44:46", - "num_cpus": 4, - "mhz_per_cpu": 4228, - "cpu_scaling_enabled": false, - "library_build_type": "release" - }, - "benchmarks": [ - { - "name": "BM_SameTimes", - "iterations": 1000, - "real_time": 10, - "cpu_time": 10, - "time_unit": "ns" - }, - { - "name": "BM_2xFaster", - "iterations": 1000, - "real_time": 50, - "cpu_time": 50, - "time_unit": "ns" - }, - { - "name": "BM_2xSlower", - "iterations": 1000, - "real_time": 50, - "cpu_time": 50, - "time_unit": "ns" - }, - { - "name": "BM_1PercentFaster", - "iterations": 1000, - "real_time": 100, - "cpu_time": 100, - "time_unit": "ns" - }, - { - "name": "BM_1PercentSlower", - "iterations": 1000, - "real_time": 100, - "cpu_time": 100, - "time_unit": "ns" - }, - { - "name": "BM_10PercentFaster", - "iterations": 1000, - "real_time": 100, - "cpu_time": 100, - "time_unit": "ns" - }, - { - "name": "BM_10PercentSlower", - "iterations": 1000, - "real_time": 100, - "cpu_time": 100, - "time_unit": "ns" - }, - { - "name": "BM_100xSlower", - "iterations": 1000, - "real_time": 100, - "cpu_time": 100, - "time_unit": "ns" - }, - { - "name": "BM_100xFaster", - "iterations": 1000, - "real_time": 10000, - "cpu_time": 10000, - "time_unit": "ns" - }, - { - "name": "BM_10PercentCPUToTime", - "iterations": 1000, - "real_time": 100, - "cpu_time": 100, - "time_unit": "ns" - }, - { - "name": "BM_ThirdFaster", - "iterations": 1000, - "real_time": 100, - "cpu_time": 100, - "time_unit": "ns" - }, - { - "name": "MyComplexityTest_BigO", - "run_name": "MyComplexityTest", - "run_type": "aggregate", - "aggregate_name": "BigO", - "cpu_coefficient": 4.2749856294592886e+00, - "real_coefficient": 6.4789275289789780e+00, - "big_o": "N", - "time_unit": "ns" - }, - { - "name": "MyComplexityTest_RMS", - "run_name": "MyComplexityTest", - "run_type": "aggregate", - "aggregate_name": "RMS", - "rms": 4.5097802512472874e-03 - }, - { - "name": "BM_NotBadTimeUnit", - "iterations": 1000, - "real_time": 0.4, - "cpu_time": 0.5, - "time_unit": "s" - }, - { - "name": "BM_DifferentTimeUnit", - "iterations": 1, - "real_time": 1, - "cpu_time": 1, - "time_unit": "s" - } - ] -} diff --git a/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test1_run2.json b/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test1_run2.json deleted file mode 100755 index 3cbcf39b0c..0000000000 --- a/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test1_run2.json +++ /dev/null @@ -1,119 +0,0 @@ -{ - "context": { - "date": "2016-08-02 17:44:46", - "num_cpus": 4, - "mhz_per_cpu": 4228, - "cpu_scaling_enabled": false, - "library_build_type": "release" - }, - "benchmarks": [ - { - "name": "BM_SameTimes", - "iterations": 1000, - "real_time": 10, - "cpu_time": 10, - "time_unit": "ns" - }, - { - "name": "BM_2xFaster", - "iterations": 1000, - "real_time": 25, - "cpu_time": 25, - "time_unit": "ns" - }, - { - "name": "BM_2xSlower", - "iterations": 20833333, - "real_time": 100, - "cpu_time": 100, - "time_unit": "ns" - }, - { - "name": "BM_1PercentFaster", - "iterations": 1000, - "real_time": 98.9999999, - "cpu_time": 98.9999999, - "time_unit": "ns" - }, - { - "name": "BM_1PercentSlower", - "iterations": 1000, - "real_time": 100.9999999, - "cpu_time": 100.9999999, - "time_unit": "ns" - }, - { - "name": "BM_10PercentFaster", - "iterations": 1000, - "real_time": 90, - "cpu_time": 90, - "time_unit": "ns" - }, - { - "name": "BM_10PercentSlower", - "iterations": 1000, - "real_time": 110, - "cpu_time": 110, - "time_unit": "ns" - }, - { - "name": "BM_100xSlower", - "iterations": 1000, - "real_time": 1.0000e+04, - "cpu_time": 1.0000e+04, - "time_unit": "ns" - }, - { - "name": "BM_100xFaster", - "iterations": 1000, - "real_time": 100, - "cpu_time": 100, - "time_unit": "ns" - }, - { - "name": "BM_10PercentCPUToTime", - "iterations": 1000, - "real_time": 110, - "cpu_time": 90, - "time_unit": "ns" - }, - { - "name": "BM_ThirdFaster", - "iterations": 1000, - "real_time": 66.665, - "cpu_time": 66.664, - "time_unit": "ns" - }, - { - "name": "MyComplexityTest_BigO", - "run_name": "MyComplexityTest", - "run_type": "aggregate", - "aggregate_name": "BigO", - "cpu_coefficient": 5.6215779594361486e+00, - "real_coefficient": 5.6288314793554610e+00, - "big_o": "N", - "time_unit": "ns" - }, - { - "name": "MyComplexityTest_RMS", - "run_name": "MyComplexityTest", - "run_type": "aggregate", - "aggregate_name": "RMS", - "rms": 3.3128901852342174e-03 - }, - { - "name": "BM_NotBadTimeUnit", - "iterations": 1000, - "real_time": 0.04, - "cpu_time": 0.6, - "time_unit": "s" - }, - { - "name": "BM_DifferentTimeUnit", - "iterations": 1, - "real_time": 1, - "cpu_time": 1, - "time_unit": "ns" - } - ] -} diff --git a/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test2_run.json b/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test2_run.json deleted file mode 100755 index 15bc698030..0000000000 --- a/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test2_run.json +++ /dev/null @@ -1,81 +0,0 @@ -{ - "context": { - "date": "2016-08-02 17:44:46", - "num_cpus": 4, - "mhz_per_cpu": 4228, - "cpu_scaling_enabled": false, - "library_build_type": "release" - }, - "benchmarks": [ - { - "name": "BM_Hi", - "iterations": 1234, - "real_time": 42, - "cpu_time": 24, - "time_unit": "ms" - }, - { - "name": "BM_Zero", - "iterations": 1000, - "real_time": 10, - "cpu_time": 10, - "time_unit": "ns" - }, - { - "name": "BM_Zero/4", - "iterations": 4000, - "real_time": 40, - "cpu_time": 40, - "time_unit": "ns" - }, - { - "name": "Prefix/BM_Zero", - "iterations": 2000, - "real_time": 20, - "cpu_time": 20, - "time_unit": "ns" - }, - { - "name": "Prefix/BM_Zero/3", - "iterations": 3000, - "real_time": 30, - "cpu_time": 30, - "time_unit": "ns" - }, - { - "name": "BM_One", - "iterations": 5000, - "real_time": 5, - "cpu_time": 5, - "time_unit": "ns" - }, - { - "name": "BM_One/4", - "iterations": 2000, - "real_time": 20, - "cpu_time": 20, - "time_unit": "ns" - }, - { - "name": "Prefix/BM_One", - "iterations": 1000, - "real_time": 10, - "cpu_time": 10, - "time_unit": "ns" - }, - { - "name": "Prefix/BM_One/3", - "iterations": 1500, - "real_time": 15, - "cpu_time": 15, - "time_unit": "ns" - }, - { - "name": "BM_Bye", - "iterations": 5321, - "real_time": 11, - "cpu_time": 63, - "time_unit": "ns" - } - ] -} diff --git a/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test3_run0.json b/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test3_run0.json deleted file mode 100755 index 49f8b06143..0000000000 --- a/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test3_run0.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "context": { - "date": "2016-08-02 17:44:46", - "num_cpus": 4, - "mhz_per_cpu": 4228, - "cpu_scaling_enabled": false, - "library_build_type": "release" - }, - "benchmarks": [ - { - "name": "BM_One", - "run_type": "aggregate", - "iterations": 1000, - "real_time": 10, - "cpu_time": 100, - "time_unit": "ns" - }, - { - "name": "BM_Two", - "iterations": 1000, - "real_time": 9, - "cpu_time": 90, - "time_unit": "ns" - }, - { - "name": "BM_Two", - "iterations": 1000, - "real_time": 8, - "cpu_time": 86, - "time_unit": "ns" - }, - { - "name": "short", - "run_type": "aggregate", - "iterations": 1000, - "real_time": 8, - "cpu_time": 80, - "time_unit": "ns" - }, - { - "name": "short", - "run_type": "aggregate", - "iterations": 1000, - "real_time": 8, - "cpu_time": 77, - "time_unit": "ns" - }, - { - "name": "medium", - "run_type": "iteration", - "iterations": 1000, - "real_time": 8, - "cpu_time": 80, - "time_unit": "ns" - }, - { - "name": "medium", - "run_type": "iteration", - "iterations": 1000, - "real_time": 9, - "cpu_time": 82, - "time_unit": "ns" - } - ] -} diff --git a/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test3_run1.json b/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test3_run1.json deleted file mode 100755 index acc5ba17ae..0000000000 --- a/benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test3_run1.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "context": { - "date": "2016-08-02 17:44:46", - "num_cpus": 4, - "mhz_per_cpu": 4228, - "cpu_scaling_enabled": false, - "library_build_type": "release" - }, - "benchmarks": [ - { - "name": "BM_One", - "iterations": 1000, - "real_time": 9, - "cpu_time": 110, - "time_unit": "ns" - }, - { - "name": "BM_Two", - "run_type": "aggregate", - "iterations": 1000, - "real_time": 10, - "cpu_time": 89, - "time_unit": "ns" - }, - { - "name": "BM_Two", - "iterations": 1000, - "real_time": 7, - "cpu_time": 72, - "time_unit": "ns" - }, - { - "name": "short", - "run_type": "aggregate", - "iterations": 1000, - "real_time": 7, - "cpu_time": 75, - "time_unit": "ns" - }, - { - "name": "short", - "run_type": "aggregate", - "iterations": 762, - "real_time": 4.54, - "cpu_time": 66.6, - "time_unit": "ns" - }, - { - "name": "short", - "run_type": "iteration", - "iterations": 1000, - "real_time": 800, - "cpu_time": 1, - "time_unit": "ns" - }, - { - "name": "medium", - "run_type": "iteration", - "iterations": 1200, - "real_time": 5, - "cpu_time": 53, - "time_unit": "ns" - } - ] -} diff --git a/benchmarks/thirdparty/benchmark/tools/gbench/__init__.py b/benchmarks/thirdparty/benchmark/tools/gbench/__init__.py deleted file mode 100755 index fce1a1acfb..0000000000 --- a/benchmarks/thirdparty/benchmark/tools/gbench/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -"""Google Benchmark tooling""" - -__author__ = 'Eric Fiselier' -__email__ = 'eric@efcs.ca' -__versioninfo__ = (0, 5, 0) -__version__ = '.'.join(str(v) for v in __versioninfo__) + 'dev' - -__all__ = [] diff --git a/benchmarks/thirdparty/benchmark/tools/gbench/report.py b/benchmarks/thirdparty/benchmark/tools/gbench/report.py deleted file mode 100755 index 5bd3a8d85d..0000000000 --- a/benchmarks/thirdparty/benchmark/tools/gbench/report.py +++ /dev/null @@ -1,541 +0,0 @@ -import unittest -"""report.py - Utilities for reporting statistics about benchmark results -""" -import os -import re -import copy - -from scipy.stats import mannwhitneyu - - -class BenchmarkColor(object): - def __init__(self, name, code): - self.name = name - self.code = code - - def __repr__(self): - return '%s%r' % (self.__class__.__name__, - (self.name, self.code)) - - def __format__(self, format): - return self.code - - -# Benchmark Colors Enumeration -BC_NONE = BenchmarkColor('NONE', '') -BC_MAGENTA = BenchmarkColor('MAGENTA', '\033[95m') -BC_CYAN = BenchmarkColor('CYAN', '\033[96m') -BC_OKBLUE = BenchmarkColor('OKBLUE', '\033[94m') -BC_OKGREEN = BenchmarkColor('OKGREEN', '\033[32m') -BC_HEADER = BenchmarkColor('HEADER', '\033[92m') -BC_WARNING = BenchmarkColor('WARNING', '\033[93m') -BC_WHITE = BenchmarkColor('WHITE', '\033[97m') -BC_FAIL = BenchmarkColor('FAIL', '\033[91m') -BC_ENDC = BenchmarkColor('ENDC', '\033[0m') -BC_BOLD = BenchmarkColor('BOLD', '\033[1m') -BC_UNDERLINE = BenchmarkColor('UNDERLINE', '\033[4m') - -UTEST_MIN_REPETITIONS = 2 -UTEST_OPTIMAL_REPETITIONS = 9 # Lowest reasonable number, More is better. -UTEST_COL_NAME = "_pvalue" - - -def color_format(use_color, fmt_str, *args, **kwargs): - """ - Return the result of 'fmt_str.format(*args, **kwargs)' after transforming - 'args' and 'kwargs' according to the value of 'use_color'. If 'use_color' - is False then all color codes in 'args' and 'kwargs' are replaced with - the empty string. - """ - assert use_color is True or use_color is False - if not use_color: - args = [arg if not isinstance(arg, BenchmarkColor) else BC_NONE - for arg in args] - kwargs = {key: arg if not isinstance(arg, BenchmarkColor) else BC_NONE - for key, arg in kwargs.items()} - return fmt_str.format(*args, **kwargs) - - -def find_longest_name(benchmark_list): - """ - Return the length of the longest benchmark name in a given list of - benchmark JSON objects - """ - longest_name = 1 - for bc in benchmark_list: - if len(bc['name']) > longest_name: - longest_name = len(bc['name']) - return longest_name - - -def calculate_change(old_val, new_val): - """ - Return a float representing the decimal change between old_val and new_val. - """ - if old_val == 0 and new_val == 0: - return 0.0 - if old_val == 0: - return float(new_val - old_val) / (float(old_val + new_val) / 2) - return float(new_val - old_val) / abs(old_val) - - -def filter_benchmark(json_orig, family, replacement=""): - """ - Apply a filter to the json, and only leave the 'family' of benchmarks. - """ - regex = re.compile(family) - filtered = {} - filtered['benchmarks'] = [] - for be in json_orig['benchmarks']: - if not regex.search(be['name']): - continue - filteredbench = copy.deepcopy(be) # Do NOT modify the old name! - filteredbench['name'] = regex.sub(replacement, filteredbench['name']) - filtered['benchmarks'].append(filteredbench) - return filtered - - -def get_unique_benchmark_names(json): - """ - While *keeping* the order, give all the unique 'names' used for benchmarks. - """ - seen = set() - uniqued = [x['name'] for x in json['benchmarks'] - if x['name'] not in seen and - (seen.add(x['name']) or True)] - return uniqued - - -def intersect(list1, list2): - """ - Given two lists, get a new list consisting of the elements only contained - in *both of the input lists*, while preserving the ordering. - """ - return [x for x in list1 if x in list2] - - -def is_potentially_comparable_benchmark(x): - return ('time_unit' in x and 'real_time' in x and 'cpu_time' in x) - - -def partition_benchmarks(json1, json2): - """ - While preserving the ordering, find benchmarks with the same names in - both of the inputs, and group them. - (i.e. partition/filter into groups with common name) - """ - json1_unique_names = get_unique_benchmark_names(json1) - json2_unique_names = get_unique_benchmark_names(json2) - names = intersect(json1_unique_names, json2_unique_names) - partitions = [] - for name in names: - time_unit = None - # Pick the time unit from the first entry of the lhs benchmark. - # We should be careful not to crash with unexpected input. - for x in json1['benchmarks']: - if (x['name'] == name and is_potentially_comparable_benchmark(x)): - time_unit = x['time_unit'] - break - if time_unit is None: - continue - # Filter by name and time unit. - # All the repetitions are assumed to be comparable. - lhs = [x for x in json1['benchmarks'] if x['name'] == name and - x['time_unit'] == time_unit] - rhs = [x for x in json2['benchmarks'] if x['name'] == name and - x['time_unit'] == time_unit] - partitions.append([lhs, rhs]) - return partitions - - -def extract_field(partition, field_name): - # The count of elements may be different. We want *all* of them. - lhs = [x[field_name] for x in partition[0]] - rhs = [x[field_name] for x in partition[1]] - return [lhs, rhs] - -def calc_utest(timings_cpu, timings_time): - min_rep_cnt = min(len(timings_time[0]), - len(timings_time[1]), - len(timings_cpu[0]), - len(timings_cpu[1])) - - # Does *everything* has at least UTEST_MIN_REPETITIONS repetitions? - if min_rep_cnt < UTEST_MIN_REPETITIONS: - return False, None, None - - time_pvalue = mannwhitneyu( - timings_time[0], timings_time[1], alternative='two-sided').pvalue - cpu_pvalue = mannwhitneyu( - timings_cpu[0], timings_cpu[1], alternative='two-sided').pvalue - - return (min_rep_cnt >= UTEST_OPTIMAL_REPETITIONS), cpu_pvalue, time_pvalue - -def print_utest(partition, utest_alpha, first_col_width, use_color=True): - def get_utest_color(pval): - return BC_FAIL if pval >= utest_alpha else BC_OKGREEN - - timings_time = extract_field(partition, 'real_time') - timings_cpu = extract_field(partition, 'cpu_time') - have_optimal_repetitions, cpu_pvalue, time_pvalue = calc_utest(timings_cpu, timings_time) - - # Check if we failed miserably with minimum required repetitions for utest - if not have_optimal_repetitions and cpu_pvalue is None and time_pvalue is None: - return [] - - dsc = "U Test, Repetitions: {} vs {}".format( - len(timings_cpu[0]), len(timings_cpu[1])) - dsc_color = BC_OKGREEN - - # We still got some results to show but issue a warning about it. - if not have_optimal_repetitions: - dsc_color = BC_WARNING - dsc += ". WARNING: Results unreliable! {}+ repetitions recommended.".format( - UTEST_OPTIMAL_REPETITIONS) - - special_str = "{}{:<{}s}{endc}{}{:16.4f}{endc}{}{:16.4f}{endc}{} {}" - - last_name = partition[0][0]['name'] - return [color_format(use_color, - special_str, - BC_HEADER, - "{}{}".format(last_name, UTEST_COL_NAME), - first_col_width, - get_utest_color(time_pvalue), time_pvalue, - get_utest_color(cpu_pvalue), cpu_pvalue, - dsc_color, dsc, - endc=BC_ENDC)] - - -def generate_difference_report( - json1, - json2, - display_aggregates_only=False, - utest=False, - utest_alpha=0.05, - use_color=True): - """ - Calculate and report the difference between each test of two benchmarks - runs specified as 'json1' and 'json2'. - """ - assert utest is True or utest is False - first_col_width = find_longest_name(json1['benchmarks']) - - def find_test(name): - for b in json2['benchmarks']: - if b['name'] == name: - return b - return None - - first_col_width = max( - first_col_width, - len('Benchmark')) - first_col_width += len(UTEST_COL_NAME) - first_line = "{:<{}s}Time CPU Time Old Time New CPU Old CPU New".format( - 'Benchmark', 12 + first_col_width) - output_strs = [first_line, '-' * len(first_line)] - - partitions = partition_benchmarks(json1, json2) - for partition in partitions: - # Careful, we may have different repetition count. - for i in range(min(len(partition[0]), len(partition[1]))): - bn = partition[0][i] - other_bench = partition[1][i] - - # *If* we were asked to only display aggregates, - # and if it is non-aggregate, then skip it. - if display_aggregates_only and 'run_type' in bn and 'run_type' in other_bench: - assert bn['run_type'] == other_bench['run_type'] - if bn['run_type'] != 'aggregate': - continue - - fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}" - - def get_color(res): - if res > 0.05: - return BC_FAIL - elif res > -0.07: - return BC_WHITE - else: - return BC_CYAN - - tres = calculate_change(bn['real_time'], other_bench['real_time']) - cpures = calculate_change(bn['cpu_time'], other_bench['cpu_time']) - output_strs += [color_format(use_color, - fmt_str, - BC_HEADER, - bn['name'], - first_col_width, - get_color(tres), - tres, - get_color(cpures), - cpures, - bn['real_time'], - other_bench['real_time'], - bn['cpu_time'], - other_bench['cpu_time'], - endc=BC_ENDC)] - - # After processing the whole partition, if requested, do the U test. - if utest: - output_strs += print_utest(partition, - utest_alpha=utest_alpha, - first_col_width=first_col_width, - use_color=use_color) - - return output_strs - - -############################################################################### -# Unit tests - - -class TestGetUniqueBenchmarkNames(unittest.TestCase): - def load_results(self): - import json - testInputs = os.path.join( - os.path.dirname( - os.path.realpath(__file__)), - 'Inputs') - testOutput = os.path.join(testInputs, 'test3_run0.json') - with open(testOutput, 'r') as f: - json = json.load(f) - return json - - def test_basic(self): - expect_lines = [ - 'BM_One', - 'BM_Two', - 'short', # These two are not sorted - 'medium', # These two are not sorted - ] - json = self.load_results() - output_lines = get_unique_benchmark_names(json) - print("\n") - print("\n".join(output_lines)) - self.assertEqual(len(output_lines), len(expect_lines)) - for i in range(0, len(output_lines)): - self.assertEqual(expect_lines[i], output_lines[i]) - - -class TestReportDifference(unittest.TestCase): - def load_results(self): - import json - testInputs = os.path.join( - os.path.dirname( - os.path.realpath(__file__)), - 'Inputs') - testOutput1 = os.path.join(testInputs, 'test1_run1.json') - testOutput2 = os.path.join(testInputs, 'test1_run2.json') - with open(testOutput1, 'r') as f: - json1 = json.load(f) - with open(testOutput2, 'r') as f: - json2 = json.load(f) - return json1, json2 - - def test_basic(self): - expect_lines = [ - ['BM_SameTimes', '+0.0000', '+0.0000', '10', '10', '10', '10'], - ['BM_2xFaster', '-0.5000', '-0.5000', '50', '25', '50', '25'], - ['BM_2xSlower', '+1.0000', '+1.0000', '50', '100', '50', '100'], - ['BM_1PercentFaster', '-0.0100', '-0.0100', '100', '99', '100', '99'], - ['BM_1PercentSlower', '+0.0100', '+0.0100', '100', '101', '100', '101'], - ['BM_10PercentFaster', '-0.1000', '-0.1000', '100', '90', '100', '90'], - ['BM_10PercentSlower', '+0.1000', '+0.1000', '100', '110', '100', '110'], - ['BM_100xSlower', '+99.0000', '+99.0000', - '100', '10000', '100', '10000'], - ['BM_100xFaster', '-0.9900', '-0.9900', - '10000', '100', '10000', '100'], - ['BM_10PercentCPUToTime', '+0.1000', - '-0.1000', '100', '110', '100', '90'], - ['BM_ThirdFaster', '-0.3333', '-0.3334', '100', '67', '100', '67'], - ['BM_NotBadTimeUnit', '-0.9000', '+0.2000', '0', '0', '0', '1'], - ] - json1, json2 = self.load_results() - output_lines_with_header = generate_difference_report( - json1, json2, use_color=False) - output_lines = output_lines_with_header[2:] - print("\n") - print("\n".join(output_lines_with_header)) - self.assertEqual(len(output_lines), len(expect_lines)) - for i in range(0, len(output_lines)): - parts = [x for x in output_lines[i].split(' ') if x] - self.assertEqual(len(parts), 7) - self.assertEqual(expect_lines[i], parts) - - -class TestReportDifferenceBetweenFamilies(unittest.TestCase): - def load_result(self): - import json - testInputs = os.path.join( - os.path.dirname( - os.path.realpath(__file__)), - 'Inputs') - testOutput = os.path.join(testInputs, 'test2_run.json') - with open(testOutput, 'r') as f: - json = json.load(f) - return json - - def test_basic(self): - expect_lines = [ - ['.', '-0.5000', '-0.5000', '10', '5', '10', '5'], - ['./4', '-0.5000', '-0.5000', '40', '20', '40', '20'], - ['Prefix/.', '-0.5000', '-0.5000', '20', '10', '20', '10'], - ['Prefix/./3', '-0.5000', '-0.5000', '30', '15', '30', '15'], - ] - json = self.load_result() - json1 = filter_benchmark(json, "BM_Z.ro", ".") - json2 = filter_benchmark(json, "BM_O.e", ".") - output_lines_with_header = generate_difference_report( - json1, json2, use_color=False) - output_lines = output_lines_with_header[2:] - print("\n") - print("\n".join(output_lines_with_header)) - self.assertEqual(len(output_lines), len(expect_lines)) - for i in range(0, len(output_lines)): - parts = [x for x in output_lines[i].split(' ') if x] - self.assertEqual(len(parts), 7) - self.assertEqual(expect_lines[i], parts) - - -class TestReportDifferenceWithUTest(unittest.TestCase): - def load_results(self): - import json - testInputs = os.path.join( - os.path.dirname( - os.path.realpath(__file__)), - 'Inputs') - testOutput1 = os.path.join(testInputs, 'test3_run0.json') - testOutput2 = os.path.join(testInputs, 'test3_run1.json') - with open(testOutput1, 'r') as f: - json1 = json.load(f) - with open(testOutput2, 'r') as f: - json2 = json.load(f) - return json1, json2 - - def test_utest(self): - expect_lines = [] - expect_lines = [ - ['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'], - ['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'], - ['BM_Two', '-0.1250', '-0.1628', '8', '7', '86', '72'], - ['BM_Two_pvalue', - '0.6985', - '0.6985', - 'U', - 'Test,', - 'Repetitions:', - '2', - 'vs', - '2.', - 'WARNING:', - 'Results', - 'unreliable!', - '9+', - 'repetitions', - 'recommended.'], - ['short', '-0.1250', '-0.0625', '8', '7', '80', '75'], - ['short', '-0.4325', '-0.1351', '8', '5', '77', '67'], - ['short_pvalue', - '0.7671', - '0.1489', - 'U', - 'Test,', - 'Repetitions:', - '2', - 'vs', - '3.', - 'WARNING:', - 'Results', - 'unreliable!', - '9+', - 'repetitions', - 'recommended.'], - ['medium', '-0.3750', '-0.3375', '8', '5', '80', '53'], - ] - json1, json2 = self.load_results() - output_lines_with_header = generate_difference_report( - json1, json2, utest=True, utest_alpha=0.05, use_color=False) - output_lines = output_lines_with_header[2:] - print("\n") - print("\n".join(output_lines_with_header)) - self.assertEqual(len(output_lines), len(expect_lines)) - for i in range(0, len(output_lines)): - parts = [x for x in output_lines[i].split(' ') if x] - self.assertEqual(expect_lines[i], parts) - - -class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly( - unittest.TestCase): - def load_results(self): - import json - testInputs = os.path.join( - os.path.dirname( - os.path.realpath(__file__)), - 'Inputs') - testOutput1 = os.path.join(testInputs, 'test3_run0.json') - testOutput2 = os.path.join(testInputs, 'test3_run1.json') - with open(testOutput1, 'r') as f: - json1 = json.load(f) - with open(testOutput2, 'r') as f: - json2 = json.load(f) - return json1, json2 - - def test_utest(self): - expect_lines = [] - expect_lines = [ - ['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'], - ['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'], - ['BM_Two', '-0.1250', '-0.1628', '8', '7', '86', '72'], - ['BM_Two_pvalue', - '0.6985', - '0.6985', - 'U', - 'Test,', - 'Repetitions:', - '2', - 'vs', - '2.', - 'WARNING:', - 'Results', - 'unreliable!', - '9+', - 'repetitions', - 'recommended.'], - ['short', '-0.1250', '-0.0625', '8', '7', '80', '75'], - ['short', '-0.4325', '-0.1351', '8', '5', '77', '67'], - ['short_pvalue', - '0.7671', - '0.1489', - 'U', - 'Test,', - 'Repetitions:', - '2', - 'vs', - '3.', - 'WARNING:', - 'Results', - 'unreliable!', - '9+', - 'repetitions', - 'recommended.'], - ] - json1, json2 = self.load_results() - output_lines_with_header = generate_difference_report( - json1, json2, display_aggregates_only=True, - utest=True, utest_alpha=0.05, use_color=False) - output_lines = output_lines_with_header[2:] - print("\n") - print("\n".join(output_lines_with_header)) - self.assertEqual(len(output_lines), len(expect_lines)) - for i in range(0, len(output_lines)): - parts = [x for x in output_lines[i].split(' ') if x] - self.assertEqual(expect_lines[i], parts) - - -if __name__ == '__main__': - unittest.main() - -# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 -# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off; -# kate: indent-mode python; remove-trailing-spaces modified; diff --git a/benchmarks/thirdparty/benchmark/tools/gbench/util.py b/benchmarks/thirdparty/benchmark/tools/gbench/util.py deleted file mode 100755 index 661c4bad8d..0000000000 --- a/benchmarks/thirdparty/benchmark/tools/gbench/util.py +++ /dev/null @@ -1,163 +0,0 @@ -"""util.py - General utilities for running, loading, and processing benchmarks -""" -import json -import os -import tempfile -import subprocess -import sys - -# Input file type enumeration -IT_Invalid = 0 -IT_JSON = 1 -IT_Executable = 2 - -_num_magic_bytes = 2 if sys.platform.startswith('win') else 4 - - -def is_executable_file(filename): - """ - Return 'True' if 'filename' names a valid file which is likely - an executable. A file is considered an executable if it starts with the - magic bytes for a EXE, Mach O, or ELF file. - """ - if not os.path.isfile(filename): - return False - with open(filename, mode='rb') as f: - magic_bytes = f.read(_num_magic_bytes) - if sys.platform == 'darwin': - return magic_bytes in [ - b'\xfe\xed\xfa\xce', # MH_MAGIC - b'\xce\xfa\xed\xfe', # MH_CIGAM - b'\xfe\xed\xfa\xcf', # MH_MAGIC_64 - b'\xcf\xfa\xed\xfe', # MH_CIGAM_64 - b'\xca\xfe\xba\xbe', # FAT_MAGIC - b'\xbe\xba\xfe\xca' # FAT_CIGAM - ] - elif sys.platform.startswith('win'): - return magic_bytes == b'MZ' - else: - return magic_bytes == b'\x7FELF' - - -def is_json_file(filename): - """ - Returns 'True' if 'filename' names a valid JSON output file. - 'False' otherwise. - """ - try: - with open(filename, 'r') as f: - json.load(f) - return True - except BaseException: - pass - return False - - -def classify_input_file(filename): - """ - Return a tuple (type, msg) where 'type' specifies the classified type - of 'filename'. If 'type' is 'IT_Invalid' then 'msg' is a human readable - string represeting the error. - """ - ftype = IT_Invalid - err_msg = None - if not os.path.exists(filename): - err_msg = "'%s' does not exist" % filename - elif not os.path.isfile(filename): - err_msg = "'%s' does not name a file" % filename - elif is_executable_file(filename): - ftype = IT_Executable - elif is_json_file(filename): - ftype = IT_JSON - else: - err_msg = "'%s' does not name a valid benchmark executable or JSON file" % filename - return ftype, err_msg - - -def check_input_file(filename): - """ - Classify the file named by 'filename' and return the classification. - If the file is classified as 'IT_Invalid' print an error message and exit - the program. - """ - ftype, msg = classify_input_file(filename) - if ftype == IT_Invalid: - print("Invalid input file: %s" % msg) - sys.exit(1) - return ftype - - -def find_benchmark_flag(prefix, benchmark_flags): - """ - Search the specified list of flags for a flag matching `` and - if it is found return the arg it specifies. If specified more than once the - last value is returned. If the flag is not found None is returned. - """ - assert prefix.startswith('--') and prefix.endswith('=') - result = None - for f in benchmark_flags: - if f.startswith(prefix): - result = f[len(prefix):] - return result - - -def remove_benchmark_flags(prefix, benchmark_flags): - """ - Return a new list containing the specified benchmark_flags except those - with the specified prefix. - """ - assert prefix.startswith('--') and prefix.endswith('=') - return [f for f in benchmark_flags if not f.startswith(prefix)] - - -def load_benchmark_results(fname): - """ - Read benchmark output from a file and return the JSON object. - REQUIRES: 'fname' names a file containing JSON benchmark output. - """ - with open(fname, 'r') as f: - return json.load(f) - - -def run_benchmark(exe_name, benchmark_flags): - """ - Run a benchmark specified by 'exe_name' with the specified - 'benchmark_flags'. The benchmark is run directly as a subprocess to preserve - real time console output. - RETURNS: A JSON object representing the benchmark output - """ - output_name = find_benchmark_flag('--benchmark_out=', - benchmark_flags) - is_temp_output = False - if output_name is None: - is_temp_output = True - thandle, output_name = tempfile.mkstemp() - os.close(thandle) - benchmark_flags = list(benchmark_flags) + \ - ['--benchmark_out=%s' % output_name] - - cmd = [exe_name] + benchmark_flags - print("RUNNING: %s" % ' '.join(cmd)) - exitCode = subprocess.call(cmd) - if exitCode != 0: - print('TEST FAILED...') - sys.exit(exitCode) - json_res = load_benchmark_results(output_name) - if is_temp_output: - os.unlink(output_name) - return json_res - - -def run_or_load_benchmark(filename, benchmark_flags): - """ - Get the results for a specified benchmark. If 'filename' specifies - an executable benchmark then the results are generated by running the - benchmark. Otherwise 'filename' must name a valid JSON output file, - which is loaded and the result returned. - """ - ftype = check_input_file(filename) - if ftype == IT_JSON: - return load_benchmark_results(filename) - if ftype == IT_Executable: - return run_benchmark(filename, benchmark_flags) - raise ValueError('Unknown file type %s' % ftype) diff --git a/benchmarks/thirdparty/benchmark/tools/requirements.txt b/benchmarks/thirdparty/benchmark/tools/requirements.txt deleted file mode 100755 index 3b3331b5af..0000000000 --- a/benchmarks/thirdparty/benchmark/tools/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -scipy>=1.5.0 \ No newline at end of file diff --git a/benchmarks/thirdparty/benchmark/tools/strip_asm.py b/benchmarks/thirdparty/benchmark/tools/strip_asm.py deleted file mode 100755 index 9030550b43..0000000000 --- a/benchmarks/thirdparty/benchmark/tools/strip_asm.py +++ /dev/null @@ -1,151 +0,0 @@ -#!/usr/bin/env python - -""" -strip_asm.py - Cleanup ASM output for the specified file -""" - -from argparse import ArgumentParser -import sys -import os -import re - -def find_used_labels(asm): - found = set() - label_re = re.compile("\s*j[a-z]+\s+\.L([a-zA-Z0-9][a-zA-Z0-9_]*)") - for l in asm.splitlines(): - m = label_re.match(l) - if m: - found.add('.L%s' % m.group(1)) - return found - - -def normalize_labels(asm): - decls = set() - label_decl = re.compile("^[.]{0,1}L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)") - for l in asm.splitlines(): - m = label_decl.match(l) - if m: - decls.add(m.group(0)) - if len(decls) == 0: - return asm - needs_dot = next(iter(decls))[0] != '.' - if not needs_dot: - return asm - for ld in decls: - asm = re.sub("(^|\s+)" + ld + "(?=:|\s)", '\\1.' + ld, asm) - return asm - - -def transform_labels(asm): - asm = normalize_labels(asm) - used_decls = find_used_labels(asm) - new_asm = '' - label_decl = re.compile("^\.L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)") - for l in asm.splitlines(): - m = label_decl.match(l) - if not m or m.group(0) in used_decls: - new_asm += l - new_asm += '\n' - return new_asm - - -def is_identifier(tk): - if len(tk) == 0: - return False - first = tk[0] - if not first.isalpha() and first != '_': - return False - for i in range(1, len(tk)): - c = tk[i] - if not c.isalnum() and c != '_': - return False - return True - -def process_identifiers(l): - """ - process_identifiers - process all identifiers and modify them to have - consistent names across all platforms; specifically across ELF and MachO. - For example, MachO inserts an additional understore at the beginning of - names. This function removes that. - """ - parts = re.split(r'([a-zA-Z0-9_]+)', l) - new_line = '' - for tk in parts: - if is_identifier(tk): - if tk.startswith('__Z'): - tk = tk[1:] - elif tk.startswith('_') and len(tk) > 1 and \ - tk[1].isalpha() and tk[1] != 'Z': - tk = tk[1:] - new_line += tk - return new_line - - -def process_asm(asm): - """ - Strip the ASM of unwanted directives and lines - """ - new_contents = '' - asm = transform_labels(asm) - - # TODO: Add more things we want to remove - discard_regexes = [ - re.compile("\s+\..*$"), # directive - re.compile("\s*#(NO_APP|APP)$"), #inline ASM - re.compile("\s*#.*$"), # comment line - re.compile("\s*\.globa?l\s*([.a-zA-Z_][a-zA-Z0-9$_.]*)"), #global directive - re.compile("\s*\.(string|asciz|ascii|[1248]?byte|short|word|long|quad|value|zero)"), - ] - keep_regexes = [ - - ] - fn_label_def = re.compile("^[a-zA-Z_][a-zA-Z0-9_.]*:") - for l in asm.splitlines(): - # Remove Mach-O attribute - l = l.replace('@GOTPCREL', '') - add_line = True - for reg in discard_regexes: - if reg.match(l) is not None: - add_line = False - break - for reg in keep_regexes: - if reg.match(l) is not None: - add_line = True - break - if add_line: - if fn_label_def.match(l) and len(new_contents) != 0: - new_contents += '\n' - l = process_identifiers(l) - new_contents += l - new_contents += '\n' - return new_contents - -def main(): - parser = ArgumentParser( - description='generate a stripped assembly file') - parser.add_argument( - 'input', metavar='input', type=str, nargs=1, - help='An input assembly file') - parser.add_argument( - 'out', metavar='output', type=str, nargs=1, - help='The output file') - args, unknown_args = parser.parse_known_args() - input = args.input[0] - output = args.out[0] - if not os.path.isfile(input): - print(("ERROR: input file '%s' does not exist") % input) - sys.exit(1) - contents = None - with open(input, 'r') as f: - contents = f.read() - new_contents = process_asm(contents) - with open(output, 'w') as f: - f.write(new_contents) - - -if __name__ == '__main__': - main() - -# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 -# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off; -# kate: indent-mode python; remove-trailing-spaces modified; From 05e2cd4e1e395ebbfe1a93150ef84032ffb00860 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sat, 23 Jan 2021 14:22:12 +0100 Subject: [PATCH 029/113] :penguin: add target to download a Linux version of CMake --- cmake/ci.cmake | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/cmake/ci.cmake b/cmake/ci.cmake index 45eb8a91f1..105186b12c 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -473,14 +473,27 @@ add_custom_target(ci_benchmarks # CMake flags ############################################################################### -add_custom_command( - OUTPUT cmake-3.1.0-Darwin64 - COMMAND wget https://github.com/Kitware/CMake/releases/download/v3.1.0/cmake-3.1.0-Darwin64.tar.gz - COMMAND tar xfz cmake-3.1.0-Darwin64.tar.gz - COMMAND rm cmake-3.1.0-Darwin64.tar.gz - WORKING_DIRECTORY ${PROJECT_BINARY_DIR} - COMMENT "Download CMake 3.1.0" -) +if (APPLE) + add_custom_command( + OUTPUT cmake-3.1.0 + COMMAND wget https://github.com/Kitware/CMake/releases/download/v3.1.0/cmake-3.1.0-Darwin64.tar.gz + COMMAND tar xfz cmake-3.1.0-Darwin64.tar.gz + COMMAND rm cmake-3.1.0-Darwin64.tar.gz + WORKING_DIRECTORY ${PROJECT_BINARY_DIR} + COMMENT "Download CMake 3.1.0" + ) + set(CMAKE_310_BINARY ${PROJECT_BINARY_DIR}/cmake-3.1.0-Darwin64/CMake.app/Contents/bin/cmake) +else() + add_custom_command( + OUTPUT cmake-3.1.0 + COMMAND wget https://github.com/Kitware/CMake/releases/download/v3.1.0/cmake-3.1.0-Linux-x86_64.tar.gz + COMMAND tar xfz cmake-3.1.0-Linux-x86_64.tar.gz + COMMAND rm cmake-3.1.0-Linux-x86_64.tar.gz + WORKING_DIRECTORY ${PROJECT_BINARY_DIR} + COMMENT "Download CMake 3.1.0" + ) + set(CMAKE_310_BINARY ${PROJECT_BINARY_DIR}/cmake-3.1.0-Linux-x86_64/bin/cmake) +endif() set(JSON_CMAKE_FLAGS "JSON_BuildTests;JSON_Install;JSON_MultipleHeaders;JSON_Sanitizer;JSON_Valgrind;JSON_NoExceptions;JSON_Coverage") @@ -493,8 +506,8 @@ foreach(JSON_CMAKE_FLAG ${JSON_CMAKE_FLAGS}) add_custom_target("${JSON_CMAKE_FLAG_TARGET}_31" COMMENT "Check CMake flag ${JSON_CMAKE_FLAG} (CMake 3.1)" COMMAND mkdir ${PROJECT_BINARY_DIR}/build_${JSON_CMAKE_FLAG_TARGET}_31 - COMMAND cd ${PROJECT_BINARY_DIR}/build_${JSON_CMAKE_FLAG_TARGET}_31 && ${PROJECT_BINARY_DIR}/cmake-3.1.0-Darwin64/CMake.app/Contents/bin/cmake -Werror=dev ${PROJECT_SOURCE_DIR} -D${JSON_CMAKE_FLAG}=ON -DCMAKE_CXX_COMPILE_FEATURES="cxx_range_for" -DCMAKE_CXX_FLAGS="-std=gnu++11" - DEPENDS cmake-3.1.0-Darwin64 + COMMAND cd ${PROJECT_BINARY_DIR}/build_${JSON_CMAKE_FLAG_TARGET}_31 && ${CMAKE_310_BINARY} -Werror=dev ${PROJECT_SOURCE_DIR} -D${JSON_CMAKE_FLAG}=ON -DCMAKE_CXX_COMPILE_FEATURES="cxx_range_for" -DCMAKE_CXX_FLAGS="-std=gnu++11" + DEPENDS cmake-3.1.0 ) list(APPEND JSON_CMAKE_FLAG_TARGETS ${JSON_CMAKE_FLAG_TARGET} ${JSON_CMAKE_FLAG_TARGET}_31) list(APPEND JSON_CMAKE_FLAG_BUILD_DIRS ${PROJECT_BINARY_DIR}/build_${JSON_CMAKE_FLAG_TARGET} ${PROJECT_BINARY_DIR}/build_${JSON_CMAKE_FLAG_TARGET}_31) From 395788c311359147005edca3f5d6b7706911b91d Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sat, 23 Jan 2021 17:04:17 +0100 Subject: [PATCH 030/113] :hammer: fix dependency --- cmake/ci.cmake | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/cmake/ci.cmake b/cmake/ci.cmake index 105186b12c..36aaa99525 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -474,25 +474,25 @@ add_custom_target(ci_benchmarks ############################################################################### if (APPLE) + set(CMAKE_310_BINARY ${PROJECT_BINARY_DIR}/cmake-3.1.0-Darwin64/CMake.app/Contents/bin/cmake) add_custom_command( - OUTPUT cmake-3.1.0 + OUTPUT ${CMAKE_310_BINARY} COMMAND wget https://github.com/Kitware/CMake/releases/download/v3.1.0/cmake-3.1.0-Darwin64.tar.gz COMMAND tar xfz cmake-3.1.0-Darwin64.tar.gz COMMAND rm cmake-3.1.0-Darwin64.tar.gz WORKING_DIRECTORY ${PROJECT_BINARY_DIR} COMMENT "Download CMake 3.1.0" ) - set(CMAKE_310_BINARY ${PROJECT_BINARY_DIR}/cmake-3.1.0-Darwin64/CMake.app/Contents/bin/cmake) else() + set(CMAKE_310_BINARY ${PROJECT_BINARY_DIR}/cmake-3.1.0-Linux-x86_64/bin/cmake) add_custom_command( - OUTPUT cmake-3.1.0 + OUTPUT ${CMAKE_310_BINARY} COMMAND wget https://github.com/Kitware/CMake/releases/download/v3.1.0/cmake-3.1.0-Linux-x86_64.tar.gz COMMAND tar xfz cmake-3.1.0-Linux-x86_64.tar.gz COMMAND rm cmake-3.1.0-Linux-x86_64.tar.gz WORKING_DIRECTORY ${PROJECT_BINARY_DIR} COMMENT "Download CMake 3.1.0" ) - set(CMAKE_310_BINARY ${PROJECT_BINARY_DIR}/cmake-3.1.0-Linux-x86_64/bin/cmake) endif() set(JSON_CMAKE_FLAGS "JSON_BuildTests;JSON_Install;JSON_MultipleHeaders;JSON_Sanitizer;JSON_Valgrind;JSON_NoExceptions;JSON_Coverage") @@ -507,7 +507,7 @@ foreach(JSON_CMAKE_FLAG ${JSON_CMAKE_FLAGS}) COMMENT "Check CMake flag ${JSON_CMAKE_FLAG} (CMake 3.1)" COMMAND mkdir ${PROJECT_BINARY_DIR}/build_${JSON_CMAKE_FLAG_TARGET}_31 COMMAND cd ${PROJECT_BINARY_DIR}/build_${JSON_CMAKE_FLAG_TARGET}_31 && ${CMAKE_310_BINARY} -Werror=dev ${PROJECT_SOURCE_DIR} -D${JSON_CMAKE_FLAG}=ON -DCMAKE_CXX_COMPILE_FEATURES="cxx_range_for" -DCMAKE_CXX_FLAGS="-std=gnu++11" - DEPENDS cmake-3.1.0 + DEPENDS ${CMAKE_310_BINARY} ) list(APPEND JSON_CMAKE_FLAG_TARGETS ${JSON_CMAKE_FLAG_TARGET} ${JSON_CMAKE_FLAG_TARGET}_31) list(APPEND JSON_CMAKE_FLAG_BUILD_DIRS ${PROJECT_BINARY_DIR}/build_${JSON_CMAKE_FLAG_TARGET} ${PROJECT_BINARY_DIR}/build_${JSON_CMAKE_FLAG_TARGET}_31) From 96a1334072c7b2e4097f46a19bae91441e08acfa Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sat, 23 Jan 2021 17:07:09 +0100 Subject: [PATCH 031/113] :rotating_light: fix includes --- include/nlohmann/detail/hash.hpp | 3 ++- single_include/nlohmann/json.hpp | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/include/nlohmann/detail/hash.hpp b/include/nlohmann/detail/hash.hpp index c32d5535c5..12706a7fdb 100644 --- a/include/nlohmann/detail/hash.hpp +++ b/include/nlohmann/detail/hash.hpp @@ -1,6 +1,7 @@ #pragma once -#include // size_t, uint8_t +#include // uint8_t +#include // size_t #include // hash #include diff --git a/single_include/nlohmann/json.hpp b/single_include/nlohmann/json.hpp index df41129278..a02c29d196 100644 --- a/single_include/nlohmann/json.hpp +++ b/single_include/nlohmann/json.hpp @@ -4663,7 +4663,8 @@ class byte_container_with_subtype : public BinaryType // #include -#include // size_t, uint8_t +#include // uint8_t +#include // size_t #include // hash // #include From 9dd1d50bf3bd5acfe80eb5015aed1b2de7ce69e5 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sat, 23 Jan 2021 20:38:08 +0100 Subject: [PATCH 032/113] :rotating_light: fix comment --- include/nlohmann/detail/input/input_adapters.hpp | 2 +- single_include/nlohmann/json.hpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/include/nlohmann/detail/input/input_adapters.hpp b/include/nlohmann/detail/input/input_adapters.hpp index a78a6ec96c..8595448110 100644 --- a/include/nlohmann/detail/input/input_adapters.hpp +++ b/include/nlohmann/detail/input/input_adapters.hpp @@ -395,7 +395,7 @@ struct container_input_adapter_factory< ContainerType, } }; -} +} // namespace container_input_adapter_factory_impl template typename container_input_adapter_factory_impl::container_input_adapter_factory::adapter_type input_adapter(const ContainerType& container) diff --git a/single_include/nlohmann/json.hpp b/single_include/nlohmann/json.hpp index a02c29d196..a2507ff630 100644 --- a/single_include/nlohmann/json.hpp +++ b/single_include/nlohmann/json.hpp @@ -5202,7 +5202,7 @@ struct container_input_adapter_factory< ContainerType, } }; -} +} // namespace container_input_adapter_factory_impl template typename container_input_adapter_factory_impl::container_input_adapter_factory::adapter_type input_adapter(const ContainerType& container) From 2efc29540ee6ca4c83b2edf6700af03b71e0ad89 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sat, 23 Jan 2021 21:25:41 +0100 Subject: [PATCH 033/113] :wrench: adjust flags for GCC 11.0.0 20210110 (experimental) --- cmake/ci.cmake | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/cmake/ci.cmake b/cmake/ci.cmake index 36aaa99525..39c29f2a8a 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -71,6 +71,7 @@ set(GCC_CXXFLAGS "-std=c++11 \ --all-warnings \ --extra-warnings \ -W \ + -WNSObject-attribute \ -Wno-abi-tag \ -Waddress \ -Waddress-of-packed-member \ @@ -86,19 +87,25 @@ set(GCC_CXXFLAGS "-std=c++11 \ -Wanalyzer-file-leak \ -Wanalyzer-free-of-non-heap \ -Wanalyzer-malloc-leak \ + -Wanalyzer-mismatching-deallocation \ -Wanalyzer-null-argument \ -Wanalyzer-null-dereference \ -Wanalyzer-possible-null-argument \ -Wanalyzer-possible-null-dereference \ + -Wanalyzer-shift-count-negative \ + -Wanalyzer-shift-count-overflow \ -Wanalyzer-stale-setjmp-buffer \ -Wanalyzer-tainted-array-index \ -Wanalyzer-too-complex \ -Wanalyzer-unsafe-call-within-signal-handler \ -Wanalyzer-use-after-free \ -Wanalyzer-use-of-pointer-in-stale-stack-frame \ + -Wanalyzer-write-to-const \ + -Wanalyzer-write-to-string-literal \ -Warith-conversion \ -Warray-bounds \ -Warray-bounds=2 \ + -Warray-parameter=2 \ -Wattribute-alias=2 \ -Wattribute-warning \ -Wattributes \ @@ -131,6 +138,7 @@ set(GCC_CXXFLAGS "-std=c++11 \ -Wconversion-null \ -Wcoverage-mismatch \ -Wcpp \ + -Wctad-maybe-unsupported \ -Wctor-dtor-privacy \ -Wdangling-else \ -Wdate-time \ @@ -140,6 +148,8 @@ set(GCC_CXXFLAGS "-std=c++11 \ -Wdeprecated-copy \ -Wdeprecated-copy-dtor \ -Wdeprecated-declarations \ + -Wdeprecated-enum-enum-conversion \ + -Wdeprecated-enum-float-conversion \ -Wdisabled-optimization \ -Wdiv-by-zero \ -Wdouble-promotion \ @@ -149,17 +159,12 @@ set(GCC_CXXFLAGS "-std=c++11 \ -Wempty-body \ -Wendif-labels \ -Wenum-compare \ + -Wenum-conversion \ -Wexpansion-to-defined \ -Wextra \ -Wextra-semi \ -Wfloat-conversion \ -Wfloat-equal \ - -Wformat -Wformat-contains-nul \ - -Wformat -Wformat-extra-args \ - -Wformat -Wformat-nonliteral \ - -Wformat -Wformat-security \ - -Wformat -Wformat-y2k \ - -Wformat -Wformat-zero-length \ -Wformat-diag \ -Wformat-overflow=2 \ -Wformat-signedness \ @@ -192,6 +197,8 @@ set(GCC_CXXFLAGS "-std=c++11 \ -Wmemset-elt-size \ -Wmemset-transposed-args \ -Wmisleading-indentation \ + -Wmismatched-dealloc \ + -Wmismatched-new-delete \ -Wmismatched-tags \ -Wmissing-attributes \ -Wmissing-braces \ @@ -210,7 +217,6 @@ set(GCC_CXXFLAGS "-std=c++11 \ -Wnon-virtual-dtor \ -Wnonnull \ -Wnonnull-compare \ - -Wnonportable-cfstrings \ -Wnormalized=nfkc \ -Wnull-dereference \ -Wodr \ @@ -233,6 +239,7 @@ set(GCC_CXXFLAGS "-std=c++11 \ -Wpragmas \ -Wprio-ctor-dtor \ -Wpsabi \ + -Wno-range-loop-construct \ -Wredundant-decls \ -Wredundant-move \ -Wredundant-tags \ @@ -255,6 +262,7 @@ set(GCC_CXXFLAGS "-std=c++11 \ -Wsign-promo \ -Wsized-deallocation \ -Wsizeof-array-argument \ + -Wsizeof-array-div \ -Wsizeof-pointer-div \ -Wsizeof-pointer-memaccess \ -Wstack-protector \ @@ -264,8 +272,8 @@ set(GCC_CXXFLAGS "-std=c++11 \ -Wstrict-overflow \ -Wstrict-overflow=5 \ -Wstring-compare \ - -Wstringop-overflow \ -Wstringop-overflow=4 \ + -Wstringop-overread \ -Wstringop-truncation \ -Wsubobject-linkage \ -Wsuggest-attribute=cold \ @@ -291,6 +299,7 @@ set(GCC_CXXFLAGS "-std=c++11 \ -Wterminate \ -Wtrampolines \ -Wtrigraphs \ + -Wtsan \ -Wtype-limits \ -Wundef \ -Wuninitialized \ @@ -309,13 +318,15 @@ set(GCC_CXXFLAGS "-std=c++11 \ -Wunused-result \ -Wunused-value \ -Wunused-variable \ - -Wuseless-cast \ + -Wno-useless-cast \ -Wvarargs \ -Wvariadic-macros \ -Wvector-operation-performance \ + -Wvexing-parse \ -Wvirtual-inheritance \ -Wvirtual-move-assign \ -Wvla \ + -Wvla-parameter \ -Wvolatile \ -Wvolatile-register-var \ -Wwrite-strings \ From 579190152ad76ea473abbbf8bed2b76a34271411 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sun, 24 Jan 2021 13:57:29 +0100 Subject: [PATCH 034/113] :whale: user Docker image to run CI --- .github/workflows/ubuntu.yml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index b12f5cb259..cfb9f3cc36 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -105,3 +105,14 @@ jobs: run: cmake -S . -B build -DJSON_CI=On - name: build run: cmake --build build --target ci_clang_analyze + + ci_build_gcc: + runs-on: ubuntu-latest + container: + image: nlohmann/json-ci:latest + steps: + - uses: actions/checkout@v2 + - name: cmake + run: cmake -S . -B build -DJSON_CI=On + - name: build + run: cmake --build build --target ci_test_gcc From 40a6532da24de5f07bd4b367f0581cc9e03d02bb Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Mon, 25 Jan 2021 22:17:11 +0100 Subject: [PATCH 035/113] :wrench: add target for Valgrind --- cmake/ci.cmake | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/cmake/ci.cmake b/cmake/ci.cmake index 39c29f2a8a..ea9cec51c4 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -12,6 +12,8 @@ include(FindPython3) find_package(Python3 COMPONENTS Interpreter) +message(STATUS "πŸ”– CMake ${CMAKE_VERSION} (${CMAKE_COMMAND})") + find_program(CLANG_TIDY_TOOL NAMES clang-tidy-11 clang-tidy) execute_process(COMMAND ${CLANG_TIDY_TOOL} --version OUTPUT_VARIABLE CLANG_TIDY_TOOL_VERSION ERROR_VARIABLE CLANG_TIDY_TOOL_VERSION) string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" CLANG_TIDY_TOOL_VERSION "${CLANG_TIDY_TOOL_VERSION}") @@ -42,6 +44,11 @@ execute_process(COMMAND ${IWYU_TOOL} --version OUTPUT_VARIABLE IWYU_TOOL_VERSION string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" IWYU_TOOL_VERSION "${IWYU_TOOL_VERSION}") message(STATUS "πŸ”– include-what-you-use ${IWYU_TOOL_VERSION} (${IWYU_TOOL})") +find_program(VALGRIND_TOOL NAMES valgrind) +execute_process(COMMAND ${VALGRIND_TOOL} --version OUTPUT_VARIABLE VALGRIND_TOOL_VERSION ERROR_VARIABLE VALGRIND_TOOL_VERSION) +string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" VALGRIND_TOOL_VERSION "${VALGRIND_TOOL_VERSION}") +message(STATUS "πŸ”– Valgrind ${VALGRIND_TOOL_VERSION} (${VALGRIND_TOOL})") + find_program(OCLINT_TOOL NAMES oclint-json-compilation-database) find_program(PLOG_CONVERTER_TOOL NAMES plog-converter) find_program(PVS_STUDIO_ANALYZER_TOOL NAMES pvs-studio-analyzer) @@ -361,6 +368,17 @@ add_custom_target(ci_test_clang_sanitizer COMMENT "Compile and test with sanitizers" ) +############################################################################### +# Valgrind. +############################################################################### + +add_custom_target(ci_test_valgrind + COMMAND CXX=${GCC_TOOL} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_valgrind -DJSON_BuildTests=ON -GNinja -DMEMORYCHECK_COMMAND=${VALGRIND_TOOL} -DMEMORYCHECK_COMMAND_OPTIONS="--error-exitcode=1 --leak-check=full" + COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_valgrind + COMMAND cd ${PROJECT_BINARY_DIR}/build_valgrind/test && ${CMAKE_CTEST_COMMAND} -T memcheck -j10 + COMMENT "Compile and test with Valgrind" +) + ############################################################################### # Check code with Clang Static Analyzer. ############################################################################### @@ -534,6 +552,6 @@ add_custom_target(ci_cmake_flags ############################################################################### add_custom_target(ci_clean - COMMAND rm -fr ${PROJECT_BINARY_DIR}/build_gcc ${PROJECT_BINARY_DIR}/build_clang ${PROJECT_BINARY_DIR}/build_clang_analyze ${PROJECT_BINARY_DIR}/build_clang_tidy ${PROJECT_BINARY_DIR}/build_pvs_studio ${PROJECT_BINARY_DIR}/build_clang_sanitizer ${PROJECT_BINARY_DIR}/build_infer ${PROJECT_BINARY_DIR}/build_oclint ${PROJECT_BINARY_DIR}/build_benchmarks cmake-3.1.0-Darwin64 ${JSON_CMAKE_FLAG_BUILD_DIRS} ${single_binaries} + COMMAND rm -fr ${PROJECT_BINARY_DIR}/build_gcc ${PROJECT_BINARY_DIR}/build_clang ${PROJECT_BINARY_DIR}/build_clang_analyze ${PROJECT_BINARY_DIR}/build_clang_tidy ${PROJECT_BINARY_DIR}/build_pvs_studio ${PROJECT_BINARY_DIR}/build_clang_sanitizer ${PROJECT_BINARY_DIR}/build_valgrind ${PROJECT_BINARY_DIR}/build_infer ${PROJECT_BINARY_DIR}/build_oclint ${PROJECT_BINARY_DIR}/build_benchmarks cmake-3.1.0-Darwin64 ${JSON_CMAKE_FLAG_BUILD_DIRS} ${single_binaries} COMMENT "Clean generated directories" ) From f3a68aeb05e0dc3d9cea4d7a4570f72e73da0de3 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Mon, 25 Jan 2021 22:33:27 +0100 Subject: [PATCH 036/113] :construction_worker: add target for Valgrind tests --- .github/workflows/ubuntu.yml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index cfb9f3cc36..4e436689dd 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -116,3 +116,14 @@ jobs: run: cmake -S . -B build -DJSON_CI=On - name: build run: cmake --build build --target ci_test_gcc + + ci_test_valgrind: + runs-on: ubuntu-latest + container: + image: nlohmann/json-ci:latest + steps: + - uses: actions/checkout@v2 + - name: cmake + run: cmake -S . -B build -DJSON_CI=On + - name: build + run: cmake --build build --target ci_test_valgrind From f4ea5bce25103216b10c1fe629ce48a57b0ec604 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Mon, 25 Jan 2021 22:47:39 +0100 Subject: [PATCH 037/113] :alembic: add Dart --- CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 58573b0af7..d67048aa17 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -116,6 +116,7 @@ CONFIGURE_FILE( ## create and configure the unit test target ## if (JSON_BuildTests) + include(Dart) include(CTest) enable_testing() add_subdirectory(test) From 46a243bbd73fe5efd16267ba1effbd20fc7ebcea Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Tue, 26 Jan 2021 12:59:09 +0100 Subject: [PATCH 038/113] :rewind: remove Dart --- CMakeLists.txt | 1 - cmake/ci.cmake | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index d67048aa17..58573b0af7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -116,7 +116,6 @@ CONFIGURE_FILE( ## create and configure the unit test target ## if (JSON_BuildTests) - include(Dart) include(CTest) enable_testing() add_subdirectory(test) diff --git a/cmake/ci.cmake b/cmake/ci.cmake index ea9cec51c4..65eb7fb950 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -373,9 +373,9 @@ add_custom_target(ci_test_clang_sanitizer ############################################################################### add_custom_target(ci_test_valgrind - COMMAND CXX=${GCC_TOOL} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_valgrind -DJSON_BuildTests=ON -GNinja -DMEMORYCHECK_COMMAND=${VALGRIND_TOOL} -DMEMORYCHECK_COMMAND_OPTIONS="--error-exitcode=1 --leak-check=full" + COMMAND CXX=${GCC_TOOL} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_valgrind -DJSON_BuildTests=ON -DJSON_Valgrind=ON -GNinja COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_valgrind - COMMAND cd ${PROJECT_BINARY_DIR}/build_valgrind/test && ${CMAKE_CTEST_COMMAND} -T memcheck -j10 + COMMAND cd ${PROJECT_BINARY_DIR}/build_valgrind/test && ${CMAKE_CTEST_COMMAND} -j10 COMMENT "Compile and test with Valgrind" ) From eadcce9b8fb0cdbfee89ef0adc19d4014dc231c3 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Tue, 26 Jan 2021 13:45:42 +0100 Subject: [PATCH 039/113] :alembic: do not call ctest in test subdirectory --- cmake/ci.cmake | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/cmake/ci.cmake b/cmake/ci.cmake index 65eb7fb950..0f0de1a42f 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -344,14 +344,14 @@ set(GCC_CXXFLAGS "-std=c++11 \ add_custom_target(ci_test_gcc COMMAND CXX=${GCC_TOOL} CXXFLAGS=${GCC_CXXFLAGS} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_gcc -DJSON_BuildTests=ON -GNinja COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_gcc - COMMAND cd ${PROJECT_BINARY_DIR}/build_gcc/test && ${CMAKE_CTEST_COMMAND} -j10 + COMMAND cd ${PROJECT_BINARY_DIR}/build_gcc && ${CMAKE_CTEST_COMMAND} -j10 COMMENT "Compile and test with GCC" ) add_custom_target(ci_test_clang COMMAND CXX=${CLANG_TOOL} CXXFLAGS=${CLANG_CXXFLAGS} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_clang -DJSON_BuildTests=ON -DJSON_MultipleHeaders=ON -GNinja COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_clang - COMMAND cd ${PROJECT_BINARY_DIR}/build_clang/test && ${CMAKE_CTEST_COMMAND} -j10 + COMMAND cd ${PROJECT_BINARY_DIR}/build_clang && ${CMAKE_CTEST_COMMAND} -j10 COMMENT "Compile and test with Clang" ) @@ -364,7 +364,7 @@ set(CLANG_CXX_FLAGS_SANITIZER "-g -O0 -fsanitize=address -fsanitize=undefined -f add_custom_target(ci_test_clang_sanitizer COMMAND CXX=${CLANG_TOOL} CXXFLAGS=${CLANG_CXX_FLAGS_SANITIZER} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_clang_sanitizer -DJSON_BuildTests=ON -GNinja COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_clang_sanitizer - COMMAND cd ${PROJECT_BINARY_DIR}/build_clang_sanitizer/test && ${CMAKE_CTEST_COMMAND} -j10 + COMMAND cd ${PROJECT_BINARY_DIR}/build_clang_sanitizer && ${CMAKE_CTEST_COMMAND} -j10 COMMENT "Compile and test with sanitizers" ) @@ -373,9 +373,9 @@ add_custom_target(ci_test_clang_sanitizer ############################################################################### add_custom_target(ci_test_valgrind - COMMAND CXX=${GCC_TOOL} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_valgrind -DJSON_BuildTests=ON -DJSON_Valgrind=ON -GNinja + COMMAND CXX=${GCC_TOOL} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_valgrind -DJSON_BuildTests=ON -GNinja -DMEMORYCHECK_COMMAND=${VALGRIND_TOOL} -DMEMORYCHECK_COMMAND_OPTIONS="--error-exitcode=1 --leak-check=full" COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_valgrind - COMMAND cd ${PROJECT_BINARY_DIR}/build_valgrind/test && ${CMAKE_CTEST_COMMAND} -j10 + COMMAND cd ${PROJECT_BINARY_DIR}/build_valgrind && ${CMAKE_CTEST_COMMAND} -T memcheck -j10 COMMENT "Compile and test with Valgrind" ) From 8651ad47b4046bba4b6265c5c4d4231fa67fddc4 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Tue, 26 Jan 2021 13:58:39 +0100 Subject: [PATCH 040/113] :alembic: download test data explicitly --- cmake/ci.cmake | 1 + 1 file changed, 1 insertion(+) diff --git a/cmake/ci.cmake b/cmake/ci.cmake index 0f0de1a42f..84735eeb6f 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -375,6 +375,7 @@ add_custom_target(ci_test_clang_sanitizer add_custom_target(ci_test_valgrind COMMAND CXX=${GCC_TOOL} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_valgrind -DJSON_BuildTests=ON -GNinja -DMEMORYCHECK_COMMAND=${VALGRIND_TOOL} -DMEMORYCHECK_COMMAND_OPTIONS="--error-exitcode=1 --leak-check=full" COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_valgrind + COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_valgrind --target download_test_data COMMAND cd ${PROJECT_BINARY_DIR}/build_valgrind && ${CMAKE_CTEST_COMMAND} -T memcheck -j10 COMMENT "Compile and test with Valgrind" ) From 468dc5819967c76a89f3e45fa39c92f5836a29d4 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Tue, 26 Jan 2021 14:09:35 +0100 Subject: [PATCH 041/113] :alembic: only execute Valgrind tests --- cmake/ci.cmake | 5 ++--- test/CMakeLists.txt | 1 + 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmake/ci.cmake b/cmake/ci.cmake index 84735eeb6f..d1b7a68523 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -373,10 +373,9 @@ add_custom_target(ci_test_clang_sanitizer ############################################################################### add_custom_target(ci_test_valgrind - COMMAND CXX=${GCC_TOOL} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_valgrind -DJSON_BuildTests=ON -GNinja -DMEMORYCHECK_COMMAND=${VALGRIND_TOOL} -DMEMORYCHECK_COMMAND_OPTIONS="--error-exitcode=1 --leak-check=full" + COMMAND CXX=${GCC_TOOL} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_valgrind -DJSON_BuildTests=ON -DJSON_Valgrind=ON -GNinja COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_valgrind - COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_valgrind --target download_test_data - COMMAND cd ${PROJECT_BINARY_DIR}/build_valgrind && ${CMAKE_CTEST_COMMAND} -T memcheck -j10 + COMMAND cd ${PROJECT_BINARY_DIR}/build_valgrind && ${CMAKE_CTEST_COMMAND} -L valgrind -j10 COMMENT "Compile and test with Valgrind" ) diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 3d8bceb70c..dde9f96bab 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -173,6 +173,7 @@ foreach(file ${files}) WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} ) set_tests_properties("${testcase}_valgrind" PROPERTIES LABELS "valgrind") + set_tests_properties("${testcase}_valgrind" PROPERTIES LABELS "all" FIXTURES_REQUIRED TEST_DATA) endif() endforeach() From b2bc2834336816201ca6c350ccc835e533947de9 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Tue, 26 Jan 2021 14:22:38 +0100 Subject: [PATCH 042/113] :alembic: fix labels --- test/CMakeLists.txt | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index dde9f96bab..6336c3fc88 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -172,8 +172,7 @@ foreach(file ${files}) COMMAND ${memcheck_command} ${CMAKE_CURRENT_BINARY_DIR}/${testcase} ${DOCTEST_TEST_FILTER} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} ) - set_tests_properties("${testcase}_valgrind" PROPERTIES LABELS "valgrind") - set_tests_properties("${testcase}_valgrind" PROPERTIES LABELS "all" FIXTURES_REQUIRED TEST_DATA) + set_tests_properties("${testcase}_valgrind" PROPERTIES LABELS "valgrind" FIXTURES_REQUIRED TEST_DATA) endif() endforeach() From 53976034d505c43cc293c55a3ca1a32385c29ab3 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Tue, 26 Jan 2021 14:23:01 +0100 Subject: [PATCH 043/113] :fire: remove unneeded jobs --- .github/workflows/ubuntu.yml | 63 +----------------------------------- 1 file changed, 1 insertion(+), 62 deletions(-) diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 4e436689dd..1a00103659 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -3,46 +3,6 @@ name: Ubuntu on: [push, pull_request] jobs: - gcc_build: - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v1 - - name: install_gcc - run: | - sudo apt update - sudo apt install gcc-10 g++-10 - shell: bash - - name: cmake - run: cmake -S . -B build -D CMAKE_BUILD_TYPE=Debug -DJSON_BuildTests=On - env: - CC: gcc-10 - CXX: g++-10 - - name: build - run: cmake --build build --parallel 10 - - name: test - run: cd build ; ctest -j 10 --output-on-failure - - clang_build: - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v1 - - name: install_clang - run: | - sudo apt update - sudo apt install clang-10 - shell: bash - - name: cmake - run: cmake -S . -B build -D CMAKE_BUILD_TYPE=Debug -DJSON_BuildTests=On - env: - CC: clang-10 - CXX: clang++-10 - - name: build - run: cmake --build build --parallel 10 - - name: test - run: cd build ; ctest -j 10 --output-on-failure - clang_build_cxx20: runs-on: ubuntu-latest @@ -63,27 +23,6 @@ jobs: - name: test run: cd build ; ctest -j 10 --output-on-failure - ci_test_clang: - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v1 - - name: install_ninja - run: | - sudo apt update - sudo apt install ninja-build - shell: bash - - name: install_clang - run: | - wget https://apt.llvm.org/llvm.sh - chmod +x llvm.sh - sudo ./llvm.sh 11 - shell: bash - - name: cmake - run: cmake -S . -B build -DJSON_CI=On - - name: build - run: cmake --build build --target ci_test_clang - ci_clang_analyze: runs-on: ubuntu-latest @@ -106,7 +45,7 @@ jobs: - name: build run: cmake --build build --target ci_clang_analyze - ci_build_gcc: + ci_test_gcc: runs-on: ubuntu-latest container: image: nlohmann/json-ci:latest From d9bde8ef46f67dd59f56acd7410c68bc8549dbea Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Tue, 26 Jan 2021 15:36:59 +0100 Subject: [PATCH 044/113] :hammer: cleanup --- .github/workflows/ubuntu.yml | 22 ++++++++++++++++ cmake/ci.cmake | 8 +++--- test/CMakeLists.txt | 51 +----------------------------------- 3 files changed, 27 insertions(+), 54 deletions(-) diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 1a00103659..dac7842e7e 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -45,6 +45,28 @@ jobs: - name: build run: cmake --build build --target ci_clang_analyze + ci_test_clang: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v1 + - name: install_ninja + run: | + sudo apt update + sudo apt install ninja-build + shell: bash + - name: install_clang + run: | + wget https://apt.llvm.org/llvm.sh + chmod +x llvm.sh + sudo ./llvm.sh 11 + sudo apt-get install clang-tools-11 + shell: bash + - name: cmake + run: cmake -S . -B build -DJSON_CI=On + - name: build + run: cmake --build build --target ci_test_clang + ci_test_gcc: runs-on: ubuntu-latest container: diff --git a/cmake/ci.cmake b/cmake/ci.cmake index d1b7a68523..86876439d4 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -344,14 +344,14 @@ set(GCC_CXXFLAGS "-std=c++11 \ add_custom_target(ci_test_gcc COMMAND CXX=${GCC_TOOL} CXXFLAGS=${GCC_CXXFLAGS} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_gcc -DJSON_BuildTests=ON -GNinja COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_gcc - COMMAND cd ${PROJECT_BINARY_DIR}/build_gcc && ${CMAKE_CTEST_COMMAND} -j10 + COMMAND cd ${PROJECT_BINARY_DIR}/build_gcc && ${CMAKE_CTEST_COMMAND} -j10 --output-on-failure COMMENT "Compile and test with GCC" ) add_custom_target(ci_test_clang COMMAND CXX=${CLANG_TOOL} CXXFLAGS=${CLANG_CXXFLAGS} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_clang -DJSON_BuildTests=ON -DJSON_MultipleHeaders=ON -GNinja COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_clang - COMMAND cd ${PROJECT_BINARY_DIR}/build_clang && ${CMAKE_CTEST_COMMAND} -j10 + COMMAND cd ${PROJECT_BINARY_DIR}/build_clang && ${CMAKE_CTEST_COMMAND} -j10 --output-on-failure COMMENT "Compile and test with Clang" ) @@ -364,7 +364,7 @@ set(CLANG_CXX_FLAGS_SANITIZER "-g -O0 -fsanitize=address -fsanitize=undefined -f add_custom_target(ci_test_clang_sanitizer COMMAND CXX=${CLANG_TOOL} CXXFLAGS=${CLANG_CXX_FLAGS_SANITIZER} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_clang_sanitizer -DJSON_BuildTests=ON -GNinja COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_clang_sanitizer - COMMAND cd ${PROJECT_BINARY_DIR}/build_clang_sanitizer && ${CMAKE_CTEST_COMMAND} -j10 + COMMAND cd ${PROJECT_BINARY_DIR}/build_clang_sanitizer && ${CMAKE_CTEST_COMMAND} -j10 --output-on-failure COMMENT "Compile and test with sanitizers" ) @@ -375,7 +375,7 @@ add_custom_target(ci_test_clang_sanitizer add_custom_target(ci_test_valgrind COMMAND CXX=${GCC_TOOL} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_valgrind -DJSON_BuildTests=ON -DJSON_Valgrind=ON -GNinja COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_valgrind - COMMAND cd ${PROJECT_BINARY_DIR}/build_valgrind && ${CMAKE_CTEST_COMMAND} -L valgrind -j10 + COMMAND cd ${PROJECT_BINARY_DIR}/build_valgrind && ${CMAKE_CTEST_COMMAND} -L valgrind -j10 --output-on-failure COMMENT "Compile and test with Valgrind" ) diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 6336c3fc88..15f55dfa5d 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -91,56 +91,7 @@ endif() # one executable for each unit test file ############################################################################# -set(files - src/unit-algorithms.cpp - src/unit-allocator.cpp - src/unit-alt-string.cpp - src/unit-assert_macro.cpp - src/unit-bson.cpp - src/unit-capacity.cpp - src/unit-cbor.cpp - src/unit-class_const_iterator.cpp - src/unit-class_iterator.cpp - src/unit-class_lexer.cpp - src/unit-class_parser.cpp - src/unit-comparison.cpp - src/unit-concepts.cpp - src/unit-constructor1.cpp - src/unit-constructor2.cpp - src/unit-convenience.cpp - src/unit-conversions.cpp - src/unit-deserialization.cpp - src/unit-element_access1.cpp - src/unit-element_access2.cpp - src/unit-hash.cpp - src/unit-inspection.cpp - src/unit-items.cpp - src/unit-iterators1.cpp - src/unit-iterators2.cpp - src/unit-json_patch.cpp - src/unit-json_pointer.cpp - src/unit-large_json.cpp - src/unit-merge_patch.cpp - src/unit-meta.cpp - src/unit-modifiers.cpp - src/unit-msgpack.cpp - src/unit-noexcept.cpp - src/unit-ordered_json.cpp - src/unit-ordered_map.cpp - src/unit-pointer_access.cpp - src/unit-readme.cpp - src/unit-reference_access.cpp - src/unit-regression1.cpp - src/unit-regression2.cpp - src/unit-serialization.cpp - src/unit-testsuites.cpp - src/unit-to_chars.cpp - src/unit-ubjson.cpp - src/unit-udt.cpp - src/unit-udt_macro.cpp - src/unit-unicode.cpp - src/unit-user_defined_input.cpp - src/unit-wstring.cpp) +file(GLOB files src/unit-*.cpp) foreach(file ${files}) get_filename_component(file_basename ${file} NAME_WE) From 702d223899b2315d86c443999181072c0fd1c11f Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Tue, 26 Jan 2021 22:25:20 +0100 Subject: [PATCH 045/113] :bug: fix OCLint call --- cmake/ci.cmake | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/cmake/ci.cmake b/cmake/ci.cmake index 86876439d4..f0ab2da536 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -50,6 +50,11 @@ string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" VALGRIND_TOOL_VERSION "${VALGRIND_TOOL_V message(STATUS "πŸ”– Valgrind ${VALGRIND_TOOL_VERSION} (${VALGRIND_TOOL})") find_program(OCLINT_TOOL NAMES oclint-json-compilation-database) +find_program(OCLINT_VERSION_TOOL NAMES oclint) +execute_process(COMMAND ${OCLINT_VERSION_TOOL} --version OUTPUT_VARIABLE OCLINT_TOOL_VERSION ERROR_VARIABLE OCLINT_TOOL_VERSION) +string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" OCLINT_TOOL_VERSION "${OCLINT_TOOL_VERSION}") +message(STATUS "πŸ”– OCLint ${OCLINT_TOOL_VERSION} (${OCLINT_TOOL})") + find_program(PLOG_CONVERTER_TOOL NAMES plog-converter) find_program(PVS_STUDIO_ANALYZER_TOOL NAMES pvs-studio-analyzer) find_program(SCAN_BUILD_TOOL NAMES scan-build-11 scan-build) @@ -422,7 +427,7 @@ target_compile_features(single_all PRIVATE cxx_std_11) add_custom_target(ci_oclint COMMAND ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_oclint -DJSON_BuildTests=OFF -DJSON_CI=ON -DCMAKE_EXPORT_COMPILE_COMMANDS=ON - COMMAND ${OCLINT_TOOL} -i ${PROJECT_BINARY_DIR}/src_single/all.cpp -p ${PROJECT_BINARY_DIR}/build_oclint -- -report-type html -enable-global-analysis -o oclint_report.html + COMMAND ${OCLINT_TOOL} -i ${PROJECT_BINARY_DIR}/build_oclint/src_single/all.cpp -p ${PROJECT_BINARY_DIR}/build_oclint -- -report-type html -enable-global-analysis -o oclint_report.html COMMENT "Check code with OCLint" ) From 309829d64ff4473b61c60f6258aa65fd25733c26 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Wed, 27 Jan 2021 14:22:03 +0100 Subject: [PATCH 046/113] :white_check_mark: add targets for offline and git-independent tests --- cmake/ci.cmake | 40 ++++++++++++++++++++++++++++++++-------- 1 file changed, 32 insertions(+), 8 deletions(-) diff --git a/cmake/ci.cmake b/cmake/ci.cmake index f0ab2da536..1a963905d3 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -1,10 +1,3 @@ -# macOS - -# brew install llvm cppcheck iwyu infer oclint/formulae/oclint -# brew install viva64/pvs-studio/pvs-studio -# (you will need credentials) -# brew install gcc --HEAD - ############################################################################### # Needed tools. ############################################################################### @@ -49,6 +42,11 @@ execute_process(COMMAND ${VALGRIND_TOOL} --version OUTPUT_VARIABLE VALGRIND_TOOL string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" VALGRIND_TOOL_VERSION "${VALGRIND_TOOL_VERSION}") message(STATUS "πŸ”– Valgrind ${VALGRIND_TOOL_VERSION} (${VALGRIND_TOOL})") +find_program(GIT_TOOL NAMES git) +execute_process(COMMAND ${GIT_TOOL} --version OUTPUT_VARIABLE GIT_TOOL_VERSION ERROR_VARIABLE GIT_TOOL_VERSION) +string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" GIT_TOOL_VERSION "${GIT_TOOL_VERSION}") +message(STATUS "πŸ”– Git ${GIT_TOOL_VERSION} (${GIT_TOOL})") + find_program(OCLINT_TOOL NAMES oclint-json-compilation-database) find_program(OCLINT_VERSION_TOOL NAMES oclint) execute_process(COMMAND ${OCLINT_VERSION_TOOL} --version OUTPUT_VARIABLE OCLINT_TOOL_VERSION ERROR_VARIABLE OCLINT_TOOL_VERSION) @@ -463,6 +461,32 @@ add_custom_target(ci_infer COMMENT "Check code with Infer" ) +############################################################################### +# Run test suite with previously downloaded test data. +############################################################################### + +add_custom_target(ci_offline_testdata + COMMAND mkdir -p ${PROJECT_BINARY_DIR}/build_offline_testdata/test_data + COMMAND cd ${PROJECT_BINARY_DIR}/build_offline_testdata/test_data && ${GIT_TOOL} clone -c advice.detachedHead=false --branch v3.0.0 https://github.com/nlohmann/json_test_data.git --quiet --depth 1 + COMMAND ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_offline_testdata -DJSON_BuildTests=ON -DJSON_TestDataDirectory=${PROJECT_BINARY_DIR}/build_offline_testdata/test_data/json_test_data -GNinja + COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_offline_testdata + COMMAND cd ${PROJECT_BINARY_DIR}/build_offline_testdata && ${CMAKE_CTEST_COMMAND} -j10 --output-on-failure + COMMENT "Check code with previously downloaded test data" +) + +############################################################################### +# Run test suite when project was not checked out from Git +############################################################################### + +add_custom_target(ci_non_git_tests + COMMAND mkdir -p ${PROJECT_BINARY_DIR}/build_ci_non_git_tests/sources + COMMAND cd ${PROJECT_SOURCE_DIR} && for FILE in `${GIT_TOOL} ls-tree --name-only HEAD`\; do cp -r $$FILE ${PROJECT_BINARY_DIR}/build_ci_non_git_tests/sources \; done + COMMAND ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_BINARY_DIR}/build_ci_non_git_tests/sources -B${PROJECT_BINARY_DIR}/build_ci_non_git_tests -DJSON_BuildTests=ON -GNinja + COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_ci_non_git_tests + COMMAND cd ${PROJECT_BINARY_DIR}/build_ci_non_git_tests && ${CMAKE_CTEST_COMMAND} -j10 -LE git_required --output-on-failure + COMMENT "Check code when project was not checked out from Git" +) + ############################################################################### # Check if every header in the include folder includes sufficient headers to # be compiled individually. @@ -557,6 +581,6 @@ add_custom_target(ci_cmake_flags ############################################################################### add_custom_target(ci_clean - COMMAND rm -fr ${PROJECT_BINARY_DIR}/build_gcc ${PROJECT_BINARY_DIR}/build_clang ${PROJECT_BINARY_DIR}/build_clang_analyze ${PROJECT_BINARY_DIR}/build_clang_tidy ${PROJECT_BINARY_DIR}/build_pvs_studio ${PROJECT_BINARY_DIR}/build_clang_sanitizer ${PROJECT_BINARY_DIR}/build_valgrind ${PROJECT_BINARY_DIR}/build_infer ${PROJECT_BINARY_DIR}/build_oclint ${PROJECT_BINARY_DIR}/build_benchmarks cmake-3.1.0-Darwin64 ${JSON_CMAKE_FLAG_BUILD_DIRS} ${single_binaries} + COMMAND rm -fr ${PROJECT_BINARY_DIR}/build_gcc ${PROJECT_BINARY_DIR}/build_clang ${PROJECT_BINARY_DIR}/build_clang_analyze ${PROJECT_BINARY_DIR}/build_clang_tidy ${PROJECT_BINARY_DIR}/build_pvs_studio ${PROJECT_BINARY_DIR}/build_clang_sanitizer ${PROJECT_BINARY_DIR}/build_valgrind ${PROJECT_BINARY_DIR}/build_infer ${PROJECT_BINARY_DIR}/build_oclint ${PROJECT_BINARY_DIR}/build_benchmarks ${PROJECT_BINARY_DIR}/build_ci_non_git_tests ${PROJECT_BINARY_DIR}/build_offline_testdata cmake-3.1.0-Darwin64 ${JSON_CMAKE_FLAG_BUILD_DIRS} ${single_binaries} COMMENT "Clean generated directories" ) From 24cab965d1a5a3b6a2b98b915127d300db475ef5 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Wed, 27 Jan 2021 15:05:31 +0100 Subject: [PATCH 047/113] :white_check_mark: add targets for C++ language versions and reproducible tests --- cmake/ci.cmake | 82 ++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 59 insertions(+), 23 deletions(-) diff --git a/cmake/ci.cmake b/cmake/ci.cmake index 1a963905d3..273d9b575e 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -5,17 +5,17 @@ include(FindPython3) find_package(Python3 COMPONENTS Interpreter) -message(STATUS "πŸ”– CMake ${CMAKE_VERSION} (${CMAKE_COMMAND})") +find_program(CLANG_TOOL NAMES clang++-HEAD clang++-11 clang++) +execute_process(COMMAND ${CLANG_TOOL} --version OUTPUT_VARIABLE CLANG_TOOL_VERSION ERROR_VARIABLE CLANG_TOOL_VERSION) +string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" CLANG_TOOL_VERSION "${CLANG_TOOL_VERSION}") +message(STATUS "πŸ”– Clang ${CLANG_TOOL_VERSION} (${CLANG_TOOL})") find_program(CLANG_TIDY_TOOL NAMES clang-tidy-11 clang-tidy) execute_process(COMMAND ${CLANG_TIDY_TOOL} --version OUTPUT_VARIABLE CLANG_TIDY_TOOL_VERSION ERROR_VARIABLE CLANG_TIDY_TOOL_VERSION) string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" CLANG_TIDY_TOOL_VERSION "${CLANG_TIDY_TOOL_VERSION}") message(STATUS "πŸ”– Clang-Tidy ${CLANG_TIDY_TOOL_VERSION} (${CLANG_TIDY_TOOL})") -find_program(CLANG_TOOL NAMES clang++-HEAD clang++-11 clang++) -execute_process(COMMAND ${CLANG_TOOL} --version OUTPUT_VARIABLE CLANG_TOOL_VERSION ERROR_VARIABLE CLANG_TOOL_VERSION) -string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" CLANG_TOOL_VERSION "${CLANG_TOOL_VERSION}") -message(STATUS "πŸ”– Clang ${CLANG_TOOL_VERSION} (${CLANG_TOOL})") +message(STATUS "πŸ”– CMake ${CMAKE_VERSION} (${CMAKE_COMMAND})") find_program(CPPCHECK_TOOL NAMES cppcheck) execute_process(COMMAND ${CPPCHECK_TOOL} --version OUTPUT_VARIABLE CPPCHECK_TOOL_VERSION ERROR_VARIABLE CPPCHECK_TOOL_VERSION) @@ -27,25 +27,25 @@ execute_process(COMMAND ${GCC_TOOL} --version OUTPUT_VARIABLE GCC_TOOL_VERSION E string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" GCC_TOOL_VERSION "${GCC_TOOL_VERSION}") message(STATUS "πŸ”– GCC ${GCC_TOOL_VERSION} (${GCC_TOOL})") -find_program(INFER_TOOL NAMES infer) -execute_process(COMMAND ${INFER_TOOL} --version OUTPUT_VARIABLE INFER_TOOL_VERSION ERROR_VARIABLE INFER_TOOL_VERSION) -string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" INFER_TOOL_VERSION "${INFER_TOOL_VERSION}") -message(STATUS "πŸ”– Infer ${INFER_TOOL_VERSION} (${INFER_TOOL})") +find_program(GIT_TOOL NAMES git) +execute_process(COMMAND ${GIT_TOOL} --version OUTPUT_VARIABLE GIT_TOOL_VERSION ERROR_VARIABLE GIT_TOOL_VERSION) +string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" GIT_TOOL_VERSION "${GIT_TOOL_VERSION}") +message(STATUS "πŸ”– Git ${GIT_TOOL_VERSION} (${GIT_TOOL})") find_program(IWYU_TOOL NAMES include-what-you-use iwyu) execute_process(COMMAND ${IWYU_TOOL} --version OUTPUT_VARIABLE IWYU_TOOL_VERSION ERROR_VARIABLE IWYU_TOOL_VERSION) string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" IWYU_TOOL_VERSION "${IWYU_TOOL_VERSION}") message(STATUS "πŸ”– include-what-you-use ${IWYU_TOOL_VERSION} (${IWYU_TOOL})") -find_program(VALGRIND_TOOL NAMES valgrind) -execute_process(COMMAND ${VALGRIND_TOOL} --version OUTPUT_VARIABLE VALGRIND_TOOL_VERSION ERROR_VARIABLE VALGRIND_TOOL_VERSION) -string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" VALGRIND_TOOL_VERSION "${VALGRIND_TOOL_VERSION}") -message(STATUS "πŸ”– Valgrind ${VALGRIND_TOOL_VERSION} (${VALGRIND_TOOL})") +find_program(INFER_TOOL NAMES infer) +execute_process(COMMAND ${INFER_TOOL} --version OUTPUT_VARIABLE INFER_TOOL_VERSION ERROR_VARIABLE INFER_TOOL_VERSION) +string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" INFER_TOOL_VERSION "${INFER_TOOL_VERSION}") +message(STATUS "πŸ”– Infer ${INFER_TOOL_VERSION} (${INFER_TOOL})") -find_program(GIT_TOOL NAMES git) -execute_process(COMMAND ${GIT_TOOL} --version OUTPUT_VARIABLE GIT_TOOL_VERSION ERROR_VARIABLE GIT_TOOL_VERSION) -string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" GIT_TOOL_VERSION "${GIT_TOOL_VERSION}") -message(STATUS "πŸ”– Git ${GIT_TOOL_VERSION} (${GIT_TOOL})") +find_program(NINJA_TOOL NAMES ninja) +execute_process(COMMAND ${NINJA_TOOL} --version OUTPUT_VARIABLE NINJA_TOOL_VERSION ERROR_VARIABLE NINJA_TOOL_VERSION) +string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" NINJA_TOOL_VERSION "${NINJA_TOOL_VERSION}") +message(STATUS "πŸ”– Ninja ${NINJA_TOOL_VERSION} (${NINJA_TOOL})") find_program(OCLINT_TOOL NAMES oclint-json-compilation-database) find_program(OCLINT_VERSION_TOOL NAMES oclint) @@ -53,6 +53,11 @@ execute_process(COMMAND ${OCLINT_VERSION_TOOL} --version OUTPUT_VARIABLE OCLINT_ string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" OCLINT_TOOL_VERSION "${OCLINT_TOOL_VERSION}") message(STATUS "πŸ”– OCLint ${OCLINT_TOOL_VERSION} (${OCLINT_TOOL})") +find_program(VALGRIND_TOOL NAMES valgrind) +execute_process(COMMAND ${VALGRIND_TOOL} --version OUTPUT_VARIABLE VALGRIND_TOOL_VERSION ERROR_VARIABLE VALGRIND_TOOL_VERSION) +string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" VALGRIND_TOOL_VERSION "${VALGRIND_TOOL_VERSION}") +message(STATUS "πŸ”– Valgrind ${VALGRIND_TOOL_VERSION} (${VALGRIND_TOOL})") + find_program(PLOG_CONVERTER_TOOL NAMES plog-converter) find_program(PVS_STUDIO_ANALYZER_TOOL NAMES pvs-studio-analyzer) find_program(SCAN_BUILD_TOOL NAMES scan-build-11 scan-build) @@ -358,6 +363,26 @@ add_custom_target(ci_test_clang COMMENT "Compile and test with Clang" ) +############################################################################### +# Different C++ Standards. +############################################################################### + +foreach(CXX_STANDARD 11 14 17 20) + add_custom_target(ci_test_gcc_${CXX_STANDARD} + COMMAND CXX=${GCC_TOOL} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_gcc_${CXX_STANDARD} -DJSON_BuildTests=ON -DCMAKE_CXX_STANDARD=${CXX_STANDARD} -DCMAKE_CXX_STANDARD_REQUIRED=ON -GNinja + COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_gcc_${CXX_STANDARD} + COMMAND cd ${PROJECT_BINARY_DIR}/build_gcc_${CXX_STANDARD} && ${CMAKE_CTEST_COMMAND} -j10 --output-on-failure + COMMENT "Compile and test with GCC for C++${CXX_STANDARD}" + ) + + add_custom_target(ci_test_clang_${CXX_STANDARD} + COMMAND CXX=${CLANG_TOOL} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_clang_${CXX_STANDARD} -DJSON_BuildTests=ON -DCMAKE_CXX_STANDARD=${CXX_STANDARD} -DCMAKE_CXX_STANDARD_REQUIRED=ON -GNinja + COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_clang_${CXX_STANDARD} + COMMAND cd ${PROJECT_BINARY_DIR}/build_clang_${CXX_STANDARD} && ${CMAKE_CTEST_COMMAND} -j10 --output-on-failure + COMMENT "Compile and test with Clang for C++${CXX_STANDARD}" + ) +endforeach() + ############################################################################### # Sanitizers. ############################################################################### @@ -479,14 +504,25 @@ add_custom_target(ci_offline_testdata ############################################################################### add_custom_target(ci_non_git_tests - COMMAND mkdir -p ${PROJECT_BINARY_DIR}/build_ci_non_git_tests/sources - COMMAND cd ${PROJECT_SOURCE_DIR} && for FILE in `${GIT_TOOL} ls-tree --name-only HEAD`\; do cp -r $$FILE ${PROJECT_BINARY_DIR}/build_ci_non_git_tests/sources \; done - COMMAND ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_BINARY_DIR}/build_ci_non_git_tests/sources -B${PROJECT_BINARY_DIR}/build_ci_non_git_tests -DJSON_BuildTests=ON -GNinja - COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_ci_non_git_tests - COMMAND cd ${PROJECT_BINARY_DIR}/build_ci_non_git_tests && ${CMAKE_CTEST_COMMAND} -j10 -LE git_required --output-on-failure + COMMAND mkdir -p ${PROJECT_BINARY_DIR}/build_non_git_tests/sources + COMMAND cd ${PROJECT_SOURCE_DIR} && for FILE in `${GIT_TOOL} ls-tree --name-only HEAD`\; do cp -r $$FILE ${PROJECT_BINARY_DIR}/build_non_git_tests/sources \; done + COMMAND ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_BINARY_DIR}/build_non_git_tests/sources -B${PROJECT_BINARY_DIR}/build_non_git_tests -DJSON_BuildTests=ON -GNinja + COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_non_git_tests + COMMAND cd ${PROJECT_BINARY_DIR}/build_non_git_tests && ${CMAKE_CTEST_COMMAND} -j10 -LE git_required --output-on-failure COMMENT "Check code when project was not checked out from Git" ) +############################################################################### +# Run test suite and exclude tests that change installed files +############################################################################### + +add_custom_target(ci_reproducible_tests + COMMAND ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_reproducible_tests -DJSON_BuildTests=ON -GNinja + COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_reproducible_tests + COMMAND cd ${PROJECT_BINARY_DIR}/build_reproducible_tests && ${CMAKE_CTEST_COMMAND} -j10 -LE not_reproducible --output-on-failure + COMMENT "Check code and exclude tests that change installed files" +) + ############################################################################### # Check if every header in the include folder includes sufficient headers to # be compiled individually. @@ -581,6 +617,6 @@ add_custom_target(ci_cmake_flags ############################################################################### add_custom_target(ci_clean - COMMAND rm -fr ${PROJECT_BINARY_DIR}/build_gcc ${PROJECT_BINARY_DIR}/build_clang ${PROJECT_BINARY_DIR}/build_clang_analyze ${PROJECT_BINARY_DIR}/build_clang_tidy ${PROJECT_BINARY_DIR}/build_pvs_studio ${PROJECT_BINARY_DIR}/build_clang_sanitizer ${PROJECT_BINARY_DIR}/build_valgrind ${PROJECT_BINARY_DIR}/build_infer ${PROJECT_BINARY_DIR}/build_oclint ${PROJECT_BINARY_DIR}/build_benchmarks ${PROJECT_BINARY_DIR}/build_ci_non_git_tests ${PROJECT_BINARY_DIR}/build_offline_testdata cmake-3.1.0-Darwin64 ${JSON_CMAKE_FLAG_BUILD_DIRS} ${single_binaries} + COMMAND rm -fr ${PROJECT_BINARY_DIR}/build_* cmake-3.1.0-Darwin64 ${JSON_CMAKE_FLAG_BUILD_DIRS} ${single_binaries} COMMENT "Clean generated directories" ) From 2e044e27a23e0919005b7889d80233ecb75496b9 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Wed, 27 Jan 2021 20:37:18 +0100 Subject: [PATCH 048/113] :hammer: clean up --- .github/workflows/ubuntu.yml | 21 ++---- cmake/ci.cmake | 123 ++++++++++++++++++++++++++--------- 2 files changed, 98 insertions(+), 46 deletions(-) diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index dac7842e7e..10239fedf0 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -3,25 +3,20 @@ name: Ubuntu on: [push, pull_request] jobs: - clang_build_cxx20: + ci_test_clang_cxx20: runs-on: ubuntu-latest steps: - uses: actions/checkout@v1 - name: install_clang run: | - sudo apt update - sudo apt install clang-10 + sudo apt update + sudo apt install clang-10 ninja-build shell: bash - name: cmake - run: cmake -S . -B build -D CMAKE_BUILD_TYPE=Debug -DJSON_BuildTests=On -DCMAKE_CXX_STANDARD=20 -DCMAKE_CXX_STANDARD_REQUIRED=ON - env: - CC: clang-10 - CXX: clang++-10 + run: cmake -S . -B build -DJSON_CI=On - name: build - run: cmake --build build --parallel 10 - - name: test - run: cd build ; ctest -j 10 --output-on-failure + run: cmake --build build --target ci_test_clang_cxx20 ci_clang_analyze: runs-on: ubuntu-latest @@ -69,8 +64,7 @@ jobs: ci_test_gcc: runs-on: ubuntu-latest - container: - image: nlohmann/json-ci:latest + container: nlohmann/json-ci:latest steps: - uses: actions/checkout@v2 - name: cmake @@ -80,8 +74,7 @@ jobs: ci_test_valgrind: runs-on: ubuntu-latest - container: - image: nlohmann/json-ci:latest + container: nlohmann/json-ci:latest steps: - uses: actions/checkout@v2 - name: cmake diff --git a/cmake/ci.cmake b/cmake/ci.cmake index 273d9b575e..0134328a51 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -1,3 +1,6 @@ +# number of parallel jobs for CTest +set(N 10) + ############################################################################### # Needed tools. ############################################################################### @@ -65,6 +68,10 @@ find_program(SCAN_BUILD_TOOL NAMES scan-build-11 scan-build) # the individual source files file(GLOB_RECURSE SRC_FILES ${PROJECT_SOURCE_DIR}/include/nlohmann/*.hpp) +############################################################################### +# Different C++ Standards. +############################################################################### + set(CLANG_CXXFLAGS "-std=c++11 \ -Werror \ -Weverything \ @@ -350,17 +357,23 @@ set(GCC_CXXFLAGS "-std=c++11 \ ") add_custom_target(ci_test_gcc - COMMAND CXX=${GCC_TOOL} CXXFLAGS=${GCC_CXXFLAGS} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_gcc -DJSON_BuildTests=ON -GNinja + COMMAND CXX=${GCC_TOOL} CXXFLAGS=${GCC_CXXFLAGS} ${CMAKE_COMMAND} + -DCMAKE_BUILD_TYPE=Debug -GNinja + -DJSON_BuildTests=ON + -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_gcc COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_gcc - COMMAND cd ${PROJECT_BINARY_DIR}/build_gcc && ${CMAKE_CTEST_COMMAND} -j10 --output-on-failure - COMMENT "Compile and test with GCC" + COMMAND cd ${PROJECT_BINARY_DIR}/build_gcc && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure + COMMENT "Compile and test with GCC using maximal warning flags" ) add_custom_target(ci_test_clang - COMMAND CXX=${CLANG_TOOL} CXXFLAGS=${CLANG_CXXFLAGS} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_clang -DJSON_BuildTests=ON -DJSON_MultipleHeaders=ON -GNinja + COMMAND CXX=${CLANG_TOOL} CXXFLAGS=${CLANG_CXXFLAGS} ${CMAKE_COMMAND} + -DCMAKE_BUILD_TYPE=Debug -GNinja + -DJSON_BuildTests=ON -DJSON_MultipleHeaders=ON + -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_clang COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_clang - COMMAND cd ${PROJECT_BINARY_DIR}/build_clang && ${CMAKE_CTEST_COMMAND} -j10 --output-on-failure - COMMENT "Compile and test with Clang" + COMMAND cd ${PROJECT_BINARY_DIR}/build_clang && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure + COMMENT "Compile and test with Clang using maximal warning flags" ) ############################################################################### @@ -368,17 +381,25 @@ add_custom_target(ci_test_clang ############################################################################### foreach(CXX_STANDARD 11 14 17 20) - add_custom_target(ci_test_gcc_${CXX_STANDARD} - COMMAND CXX=${GCC_TOOL} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_gcc_${CXX_STANDARD} -DJSON_BuildTests=ON -DCMAKE_CXX_STANDARD=${CXX_STANDARD} -DCMAKE_CXX_STANDARD_REQUIRED=ON -GNinja - COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_gcc_${CXX_STANDARD} - COMMAND cd ${PROJECT_BINARY_DIR}/build_gcc_${CXX_STANDARD} && ${CMAKE_CTEST_COMMAND} -j10 --output-on-failure + add_custom_target(ci_test_gcc_cxx${CXX_STANDARD} + COMMAND CXX=${GCC_TOOL} ${CMAKE_COMMAND} + -DCMAKE_BUILD_TYPE=Debug -GNinja + -DCMAKE_CXX_STANDARD=${CXX_STANDARD} -DCMAKE_CXX_STANDARD_REQUIRED=ON + -DJSON_BuildTests=ON + -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_gcc_cxx${CXX_STANDARD} + COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_gcc_cxx${CXX_STANDARD} + COMMAND cd ${PROJECT_BINARY_DIR}/build_gcc_cxx${CXX_STANDARD} && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure COMMENT "Compile and test with GCC for C++${CXX_STANDARD}" ) - add_custom_target(ci_test_clang_${CXX_STANDARD} - COMMAND CXX=${CLANG_TOOL} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_clang_${CXX_STANDARD} -DJSON_BuildTests=ON -DCMAKE_CXX_STANDARD=${CXX_STANDARD} -DCMAKE_CXX_STANDARD_REQUIRED=ON -GNinja - COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_clang_${CXX_STANDARD} - COMMAND cd ${PROJECT_BINARY_DIR}/build_clang_${CXX_STANDARD} && ${CMAKE_CTEST_COMMAND} -j10 --output-on-failure + add_custom_target(ci_test_clang_cxx${CXX_STANDARD} + COMMAND CXX=${CLANG_TOOL} ${CMAKE_COMMAND} + -DCMAKE_BUILD_TYPE=Debug -GNinja + -DCMAKE_CXX_STANDARD=${CXX_STANDARD} -DCMAKE_CXX_STANDARD_REQUIRED=ON + -DJSON_BuildTests=ON + -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_clang_cxx${CXX_STANDARD} + COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_clang_cxx${CXX_STANDARD} + COMMAND cd ${PROJECT_BINARY_DIR}/build_clang_cxx${CXX_STANDARD} && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure COMMENT "Compile and test with Clang for C++${CXX_STANDARD}" ) endforeach() @@ -390,9 +411,12 @@ endforeach() set(CLANG_CXX_FLAGS_SANITIZER "-g -O0 -fsanitize=address -fsanitize=undefined -fsanitize=integer -fsanitize=nullability -fno-omit-frame-pointer -fno-sanitize-recover=all -fsanitize-recover=unsigned-integer-overflow") add_custom_target(ci_test_clang_sanitizer - COMMAND CXX=${CLANG_TOOL} CXXFLAGS=${CLANG_CXX_FLAGS_SANITIZER} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_clang_sanitizer -DJSON_BuildTests=ON -GNinja + COMMAND CXX=${CLANG_TOOL} CXXFLAGS=${CLANG_CXX_FLAGS_SANITIZER} ${CMAKE_COMMAND} + -DCMAKE_BUILD_TYPE=Debug -GNinja + -DJSON_BuildTests=ON + -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_clang_sanitizer COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_clang_sanitizer - COMMAND cd ${PROJECT_BINARY_DIR}/build_clang_sanitizer && ${CMAKE_CTEST_COMMAND} -j10 --output-on-failure + COMMAND cd ${PROJECT_BINARY_DIR}/build_clang_sanitizer && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure COMMENT "Compile and test with sanitizers" ) @@ -401,9 +425,12 @@ add_custom_target(ci_test_clang_sanitizer ############################################################################### add_custom_target(ci_test_valgrind - COMMAND CXX=${GCC_TOOL} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_valgrind -DJSON_BuildTests=ON -DJSON_Valgrind=ON -GNinja + COMMAND CXX=${GCC_TOOL} ${CMAKE_COMMAND} + -DCMAKE_BUILD_TYPE=Debug -GNinja + -DJSON_BuildTests=ON -DJSON_Valgrind=ON + -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_valgrind COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_valgrind - COMMAND cd ${PROJECT_BINARY_DIR}/build_valgrind && ${CMAKE_CTEST_COMMAND} -L valgrind -j10 --output-on-failure + COMMAND cd ${PROJECT_BINARY_DIR}/build_valgrind && ${CMAKE_CTEST_COMMAND} -L valgrind --parallel ${N} --output-on-failure COMMENT "Compile and test with Valgrind" ) @@ -414,7 +441,10 @@ add_custom_target(ci_test_valgrind set(CLANG_ANALYZER_CHECKS "fuchsia.HandleChecker,nullability.NullableDereferenced,nullability.NullablePassedToNonnull,nullability.NullableReturnedFromNonnull,optin.cplusplus.UninitializedObject,optin.cplusplus.VirtualCall,optin.mpi.MPI-Checker,optin.osx.OSObjectCStyleCast,optin.osx.cocoa.localizability.EmptyLocalizationContextChecker,optin.osx.cocoa.localizability.NonLocalizedStringChecker,optin.performance.GCDAntipattern,optin.performance.Padding,optin.portability.UnixAPI,security.FloatLoopCounter,security.insecureAPI.DeprecatedOrUnsafeBufferHandling,security.insecureAPI.bcmp,security.insecureAPI.bcopy,security.insecureAPI.bzero,security.insecureAPI.rand,security.insecureAPI.strcpy,valist.CopyToSelf,valist.Uninitialized,valist.Unterminated,webkit.NoUncountedMemberChecker,webkit.RefCntblBaseVirtualDtor,core.CallAndMessage,core.DivideZero,core.NonNullParamChecker,core.NullDereference,core.StackAddressEscape,core.UndefinedBinaryOperatorResult,core.VLASize,core.uninitialized.ArraySubscript,core.uninitialized.Assign,core.uninitialized.Branch,core.uninitialized.CapturedBlockVariable,core.uninitialized.UndefReturn,cplusplus.InnerPointer,cplusplus.Move,cplusplus.NewDelete,cplusplus.NewDeleteLeaks,cplusplus.PlacementNew,cplusplus.PureVirtualCall,deadcode.DeadStores,nullability.NullPassedToNonnull,nullability.NullReturnedFromNonnull,osx.API,osx.MIG,osx.NumberObjectConversion,osx.OSObjectRetainCount,osx.ObjCProperty,osx.SecKeychainAPI,osx.cocoa.AtSync,osx.cocoa.AutoreleaseWrite,osx.cocoa.ClassRelease,osx.cocoa.Dealloc,osx.cocoa.IncompatibleMethodTypes,osx.cocoa.Loops,osx.cocoa.MissingSuperCall,osx.cocoa.NSAutoreleasePool,osx.cocoa.NSError,osx.cocoa.NilArg,osx.cocoa.NonNilReturnValue,osx.cocoa.ObjCGenerics,osx.cocoa.RetainCount,osx.cocoa.RunLoopAutoreleaseLeak,osx.cocoa.SelfInit,osx.cocoa.SuperDealloc,osx.cocoa.UnusedIvars,osx.cocoa.VariadicMethodTypes,osx.coreFoundation.CFError,osx.coreFoundation.CFNumber,osx.coreFoundation.CFRetainRelease,osx.coreFoundation.containers.OutOfBounds,osx.coreFoundation.containers.PointerSizedValues,security.insecureAPI.UncheckedReturn,security.insecureAPI.decodeValueOfObjCType,security.insecureAPI.getpw,security.insecureAPI.gets,security.insecureAPI.mkstemp,security.insecureAPI.mktemp,security.insecureAPI.vfork,unix.API,unix.Malloc,unix.MallocSizeof,unix.MismatchedDeallocator,unix.Vfork,unix.cstring.BadSizeArg,unix.cstring.NullArg") add_custom_target(ci_clang_analyze - COMMAND CXX=${CLANG_TOOL} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_clang_analyze -DJSON_BuildTests=ON -GNinja + COMMAND CXX=${CLANG_TOOL} ${CMAKE_COMMAND} + -DCMAKE_BUILD_TYPE=Debug -GNinja + -DJSON_BuildTests=ON + -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_clang_analyze COMMAND cd ${PROJECT_BINARY_DIR}/build_clang_analyze && ${SCAN_BUILD_TOOL} -enable-checker ${CLANG_ANALYZER_CHECKS} --use-c++=${CLANG_TOOL} -analyze-headers -o ${PROJECT_BINARY_DIR}/report ninja COMMENT "Check code with Clang Analyzer" ) @@ -449,7 +479,11 @@ add_executable(single_all ${PROJECT_BINARY_DIR}/src_single/all.cpp) target_compile_features(single_all PRIVATE cxx_std_11) add_custom_target(ci_oclint - COMMAND ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_oclint -DJSON_BuildTests=OFF -DJSON_CI=ON -DCMAKE_EXPORT_COMPILE_COMMANDS=ON + COMMAND ${CMAKE_COMMAND} + -DCMAKE_BUILD_TYPE=Debug + -DCMAKE_EXPORT_COMPILE_COMMANDS=ON + -DJSON_BuildTests=OFF -DJSON_CI=ON + -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_oclint COMMAND ${OCLINT_TOOL} -i ${PROJECT_BINARY_DIR}/build_oclint/src_single/all.cpp -p ${PROJECT_BINARY_DIR}/build_oclint -- -report-type html -enable-global-analysis -o oclint_report.html COMMENT "Check code with OCLint" ) @@ -459,7 +493,11 @@ add_custom_target(ci_oclint ############################################################################### add_custom_target(ci_clang_tidy - COMMAND CXX=${CLANG_TOOL} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_clang_tidy -DJSON_BuildTests=ON -DJSON_MultipleHeaders=ON -GNinja -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DCMAKE_CXX_CLANG_TIDY=clang-tidy + COMMAND CXX=${CLANG_TOOL} ${CMAKE_COMMAND} + -DCMAKE_BUILD_TYPE=Debug -GNinja + -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DCMAKE_CXX_CLANG_TIDY=clang-tidy + -DJSON_BuildTests=ON -DJSON_MultipleHeaders=ON + -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_clang_tidy COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_clang_tidy COMMENT "Check code with Clang-Tidy" ) @@ -469,7 +507,11 @@ add_custom_target(ci_clang_tidy ############################################################################### add_custom_target(ci_pvs_studio - COMMAND CXX=${CLANG_TOOL} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_pvs_studio -DJSON_BuildTests=ON -DCMAKE_EXPORT_COMPILE_COMMANDS=ON + COMMAND CXX=${CLANG_TOOL} ${CMAKE_COMMAND} + -DCMAKE_BUILD_TYPE=Debug + -DCMAKE_EXPORT_COMPILE_COMMANDS=ON + -DJSON_BuildTests=ON + -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_pvs_studio COMMAND cd ${PROJECT_BINARY_DIR}/build_pvs_studio && ${PVS_STUDIO_ANALYZER_TOOL} analyze -j 10 COMMAND cd ${PROJECT_BINARY_DIR}/build_pvs_studio && ${PLOG_CONVERTER_TOOL} -a'GA:1,2;64:1;CS' -t fullhtml PVS-Studio.log -o pvs COMMENT "Check code with PVS Studio" @@ -482,7 +524,7 @@ add_custom_target(ci_pvs_studio add_custom_target(ci_infer COMMAND mkdir -p ${PROJECT_BINARY_DIR}/build_infer COMMAND cd ${PROJECT_BINARY_DIR}/build_infer && ${INFER_TOOL} compile -- ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug ${PROJECT_SOURCE_DIR} -DJSON_BuildTests=ON -DJSON_MultipleHeaders=ON - COMMAND cd ${PROJECT_BINARY_DIR}/build_infer && ${INFER_TOOL} run -- make -j10 + COMMAND cd ${PROJECT_BINARY_DIR}/build_infer && ${INFER_TOOL} run -- make --parallel ${N} COMMENT "Check code with Infer" ) @@ -493,9 +535,12 @@ add_custom_target(ci_infer add_custom_target(ci_offline_testdata COMMAND mkdir -p ${PROJECT_BINARY_DIR}/build_offline_testdata/test_data COMMAND cd ${PROJECT_BINARY_DIR}/build_offline_testdata/test_data && ${GIT_TOOL} clone -c advice.detachedHead=false --branch v3.0.0 https://github.com/nlohmann/json_test_data.git --quiet --depth 1 - COMMAND ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_offline_testdata -DJSON_BuildTests=ON -DJSON_TestDataDirectory=${PROJECT_BINARY_DIR}/build_offline_testdata/test_data/json_test_data -GNinja + COMMAND ${CMAKE_COMMAND} + -DCMAKE_BUILD_TYPE=Debug -GNinja + -DJSON_BuildTests=ON -DJSON_TestDataDirectory=${PROJECT_BINARY_DIR}/build_offline_testdata/test_data/json_test_data + -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_offline_testdata COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_offline_testdata - COMMAND cd ${PROJECT_BINARY_DIR}/build_offline_testdata && ${CMAKE_CTEST_COMMAND} -j10 --output-on-failure + COMMAND cd ${PROJECT_BINARY_DIR}/build_offline_testdata && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure COMMENT "Check code with previously downloaded test data" ) @@ -506,9 +551,12 @@ add_custom_target(ci_offline_testdata add_custom_target(ci_non_git_tests COMMAND mkdir -p ${PROJECT_BINARY_DIR}/build_non_git_tests/sources COMMAND cd ${PROJECT_SOURCE_DIR} && for FILE in `${GIT_TOOL} ls-tree --name-only HEAD`\; do cp -r $$FILE ${PROJECT_BINARY_DIR}/build_non_git_tests/sources \; done - COMMAND ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_BINARY_DIR}/build_non_git_tests/sources -B${PROJECT_BINARY_DIR}/build_non_git_tests -DJSON_BuildTests=ON -GNinja + COMMAND ${CMAKE_COMMAND} + -DCMAKE_BUILD_TYPE=Debug -GNinja + -DJSON_BuildTests=ON + -S${PROJECT_BINARY_DIR}/build_non_git_tests/sources -B${PROJECT_BINARY_DIR}/build_non_git_tests COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_non_git_tests - COMMAND cd ${PROJECT_BINARY_DIR}/build_non_git_tests && ${CMAKE_CTEST_COMMAND} -j10 -LE git_required --output-on-failure + COMMAND cd ${PROJECT_BINARY_DIR}/build_non_git_tests && ${CMAKE_CTEST_COMMAND} --parallel ${N} -LE git_required --output-on-failure COMMENT "Check code when project was not checked out from Git" ) @@ -517,9 +565,12 @@ add_custom_target(ci_non_git_tests ############################################################################### add_custom_target(ci_reproducible_tests - COMMAND ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_reproducible_tests -DJSON_BuildTests=ON -GNinja + COMMAND ${CMAKE_COMMAND} + -DCMAKE_BUILD_TYPE=Debug -GNinja + -DJSON_BuildTests=ON + -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_reproducible_tests COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_reproducible_tests - COMMAND cd ${PROJECT_BINARY_DIR}/build_reproducible_tests && ${CMAKE_CTEST_COMMAND} -j10 -LE not_reproducible --output-on-failure + COMMAND cd ${PROJECT_BINARY_DIR}/build_reproducible_tests && ${CMAKE_CTEST_COMMAND} --parallel ${N} -LE not_reproducible --output-on-failure COMMENT "Check code and exclude tests that change installed files" ) @@ -557,7 +608,9 @@ add_custom_target(ci_single_binaries ############################################################################### add_custom_target(ci_benchmarks - COMMAND ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Release -S${PROJECT_SOURCE_DIR}/benchmarks -B${PROJECT_BINARY_DIR}/build_benchmarks + COMMAND ${CMAKE_COMMAND} + -DCMAKE_BUILD_TYPE=Release -GNinja + -S${PROJECT_SOURCE_DIR}/benchmarks -B${PROJECT_BINARY_DIR}/build_benchmarks COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_benchmarks --target json_benchmarks COMMAND cd ${PROJECT_BINARY_DIR}/build_benchmarks && ./json_benchmarks COMMENT "Run benchmarks" @@ -595,12 +648,18 @@ foreach(JSON_CMAKE_FLAG ${JSON_CMAKE_FLAGS}) string(TOLOWER "ci_cmake_flag_${JSON_CMAKE_FLAG}" JSON_CMAKE_FLAG_TARGET) add_custom_target("${JSON_CMAKE_FLAG_TARGET}" COMMENT "Check CMake flag ${JSON_CMAKE_FLAG} (CMake ${CMAKE_VERSION})" - COMMAND ${CMAKE_COMMAND} -Werror=dev -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_${JSON_CMAKE_FLAG_TARGET} -D${JSON_CMAKE_FLAG}=ON + COMMAND ${CMAKE_COMMAND} + -Werror=dev + -D${JSON_CMAKE_FLAG}=ON + -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_${JSON_CMAKE_FLAG_TARGET} ) add_custom_target("${JSON_CMAKE_FLAG_TARGET}_31" COMMENT "Check CMake flag ${JSON_CMAKE_FLAG} (CMake 3.1)" COMMAND mkdir ${PROJECT_BINARY_DIR}/build_${JSON_CMAKE_FLAG_TARGET}_31 - COMMAND cd ${PROJECT_BINARY_DIR}/build_${JSON_CMAKE_FLAG_TARGET}_31 && ${CMAKE_310_BINARY} -Werror=dev ${PROJECT_SOURCE_DIR} -D${JSON_CMAKE_FLAG}=ON -DCMAKE_CXX_COMPILE_FEATURES="cxx_range_for" -DCMAKE_CXX_FLAGS="-std=gnu++11" + COMMAND cd ${PROJECT_BINARY_DIR}/build_${JSON_CMAKE_FLAG_TARGET}_31 && ${CMAKE_310_BINARY} + -Werror=dev ${PROJECT_SOURCE_DIR} + -D${JSON_CMAKE_FLAG}=ON + -DCMAKE_CXX_COMPILE_FEATURES="cxx_range_for" -DCMAKE_CXX_FLAGS="-std=gnu++11" DEPENDS ${CMAKE_310_BINARY} ) list(APPEND JSON_CMAKE_FLAG_TARGETS ${JSON_CMAKE_FLAG_TARGET} ${JSON_CMAKE_FLAG_TARGET}_31) From 0a27d1cf594f11a91722b0c7290c6e041aaf7e84 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Wed, 27 Jan 2021 21:18:42 +0100 Subject: [PATCH 049/113] :construction_worker: add CI steps for cppcheck and cpplint --- .github/workflows/ubuntu.yml | 22 ++++++++++++++++--- Makefile | 2 ++ cmake/ci.cmake | 2 +- include/nlohmann/detail/json_pointer.hpp | 4 ++-- include/nlohmann/thirdparty/hedley/hedley.hpp | 2 ++ single_include/nlohmann/json.hpp | 6 +++-- 6 files changed, 30 insertions(+), 8 deletions(-) diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 10239fedf0..8036fba2a1 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -5,7 +5,6 @@ on: [push, pull_request] jobs: ci_test_clang_cxx20: runs-on: ubuntu-latest - steps: - uses: actions/checkout@v1 - name: install_clang @@ -20,7 +19,6 @@ jobs: ci_clang_analyze: runs-on: ubuntu-latest - steps: - uses: actions/checkout@v1 - name: install_ninja @@ -42,7 +40,6 @@ jobs: ci_test_clang: runs-on: ubuntu-latest - steps: - uses: actions/checkout@v1 - name: install_ninja @@ -81,3 +78,22 @@ jobs: run: cmake -S . -B build -DJSON_CI=On - name: build run: cmake --build build --target ci_test_valgrind + + ci_cppcheck: + runs-on: ubuntu-latest + container: nlohmann/json-ci:latest + steps: + - uses: actions/checkout@v2 + - name: cmake + run: cmake -S . -B build -DJSON_CI=On + - name: build + run: cmake --build build --target ci_cppcheck + + ci_cpplint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v1 + - name: cmake + run: cmake -S . -B build -DJSON_CI=On + - name: build + run: cmake --build build --target ci_cpplint diff --git a/Makefile b/Makefile index d3963f503e..655bb6c4ad 100644 --- a/Makefile +++ b/Makefile @@ -641,4 +641,6 @@ update_hedley: curl https://raw.githubusercontent.com/nemequ/hedley/master/hedley.h -o include/nlohmann/thirdparty/hedley/hedley.hpp $(SED) -i 's/HEDLEY_/JSON_HEDLEY_/g' include/nlohmann/thirdparty/hedley/hedley.hpp grep "[[:blank:]]*#[[:blank:]]*undef" include/nlohmann/thirdparty/hedley/hedley.hpp | grep -v "__" | sort | uniq | $(SED) 's/ //g' | $(SED) 's/undef/undef /g' > include/nlohmann/thirdparty/hedley/hedley_undef.hpp + $(SED) -i '1s/^/#pragma once\n\n/' include/nlohmann/thirdparty/hedley/hedley.hpp + $(SED) -i '1s/^/#pragma once\n\n/' include/nlohmann/thirdparty/hedley/hedley_undef.hpp $(MAKE) amalgamate diff --git a/cmake/ci.cmake b/cmake/ci.cmake index 0134328a51..bf276da992 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -463,7 +463,7 @@ add_custom_target(ci_cppcheck ############################################################################### add_custom_target(ci_cpplint - COMMAND ${Python3_EXECUTABLE} ${CMAKE_SOURCE_DIR}/third_party/cpplint/cpplint.py --filter=-whitespace,-legal,-runtime/references,-runtime/explicit --quiet --recursive ${SRC_FILES} + COMMAND ${Python3_EXECUTABLE} ${CMAKE_SOURCE_DIR}/third_party/cpplint/cpplint.py --filter=-whitespace,-legal,-runtime/references,-runtime/explicit,-runtime/indentation_namespace,-readability/casting --quiet --recursive ${SRC_FILES} COMMENT "Check code with cpplint" ) diff --git a/include/nlohmann/detail/json_pointer.hpp b/include/nlohmann/detail/json_pointer.hpp index 8c6bda1406..4209e676a6 100644 --- a/include/nlohmann/detail/json_pointer.hpp +++ b/include/nlohmann/detail/json_pointer.hpp @@ -349,7 +349,7 @@ class json_pointer } std::size_t processed_chars = 0; - unsigned long long res = 0; + unsigned long long res = 0; // NOLINT(runtime/int) JSON_TRY { res = std::stoull(s, &processed_chars); @@ -367,7 +367,7 @@ class json_pointer // only triggered on special platforms (like 32bit), see also // https://github.com/nlohmann/json/pull/2203 - if (res >= static_cast((std::numeric_limits::max)())) + if (res >= static_cast((std::numeric_limits::max)())) // NOLINT(runtime/int) { JSON_THROW(detail::out_of_range::create(410, "array index " + s + " exceeds size_type")); // LCOV_EXCL_LINE } diff --git a/include/nlohmann/thirdparty/hedley/hedley.hpp b/include/nlohmann/thirdparty/hedley/hedley.hpp index c1fa16dbb6..9bc7630a6f 100644 --- a/include/nlohmann/thirdparty/hedley/hedley.hpp +++ b/include/nlohmann/thirdparty/hedley/hedley.hpp @@ -1,3 +1,5 @@ +#pragma once + /* Hedley - https://nemequ.github.io/hedley * Created by Evan Nemerson * diff --git a/single_include/nlohmann/json.hpp b/single_include/nlohmann/json.hpp index a2507ff630..ff669b4241 100644 --- a/single_include/nlohmann/json.hpp +++ b/single_include/nlohmann/json.hpp @@ -107,6 +107,8 @@ struct position_t #include // pair // #include + + /* Hedley - https://nemequ.github.io/hedley * Created by Evan Nemerson * @@ -12003,7 +12005,7 @@ class json_pointer } std::size_t processed_chars = 0; - unsigned long long res = 0; + unsigned long long res = 0; // NOLINT(runtime/int) JSON_TRY { res = std::stoull(s, &processed_chars); @@ -12021,7 +12023,7 @@ class json_pointer // only triggered on special platforms (like 32bit), see also // https://github.com/nlohmann/json/pull/2203 - if (res >= static_cast((std::numeric_limits::max)())) + if (res >= static_cast((std::numeric_limits::max)())) // NOLINT(runtime/int) { JSON_THROW(detail::out_of_range::create(410, "array index " + s + " exceeds size_type")); // LCOV_EXCL_LINE } From cb28e76aa4fd03b9c0e1e4930bce78f21a6cd614 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Wed, 27 Jan 2021 22:48:04 +0100 Subject: [PATCH 050/113] :rotating_light: fix warnings from Clang-Tidy --- test/src/unit-alt-string.cpp | 20 ++++---- test/src/unit-bson.cpp | 22 ++++----- test/src/unit-comparison.cpp | 8 ++-- test/src/unit-constructor2.cpp | 6 +-- test/src/unit-convenience.cpp | 4 +- test/src/unit-json_patch.cpp | 2 +- test/src/unit-json_pointer.cpp | 2 +- test/src/unit-msgpack.cpp | 36 +++++++-------- test/src/unit-ordered_map.cpp | 1 + test/src/unit-readme.cpp | 2 +- test/src/unit-reference_access.cpp | 30 ++++++------ test/src/unit-regression2.cpp | 14 +++--- test/src/unit-to_chars.cpp | 2 +- test/src/unit-udt.cpp | 74 +++++++++++++++--------------- test/src/unit-udt_macro.cpp | 6 +-- test/src/unit-unicode.cpp | 8 ++-- test/src/unit-wstring.cpp | 2 +- 17 files changed, 120 insertions(+), 119 deletions(-) diff --git a/test/src/unit-alt-string.cpp b/test/src/unit-alt-string.cpp index b51a5a91d3..b526ae20f0 100644 --- a/test/src/unit-alt-string.cpp +++ b/test/src/unit-alt-string.cpp @@ -152,7 +152,7 @@ class alt_string private: std::string str_impl {}; - friend bool ::operator<(const char*, const alt_string&); + friend bool ::operator<(const char* /*op1*/, const alt_string& /*op2*/); }; void int_to_string(alt_string& target, std::size_t value) @@ -233,24 +233,24 @@ TEST_CASE("alternative string type") SECTION("parse") { - auto doc = alt_json::parse("{\"foo\": \"bar\"}"); + auto doc = alt_json::parse(R"({"foo": "bar"})"); alt_string dump = doc.dump(); CHECK(dump == R"({"foo":"bar"})"); } SECTION("items") { - auto doc = alt_json::parse("{\"foo\": \"bar\"}"); + auto doc = alt_json::parse(R"({"foo": "bar"})"); - for ( auto item : doc.items() ) + for (const auto& item : doc.items()) { - CHECK( item.key() == "foo" ); - CHECK( item.value() == "bar" ); + CHECK(item.key() == "foo"); + CHECK(item.value() == "bar"); } - auto doc_array = alt_json::parse("[\"foo\", \"bar\"]"); + auto doc_array = alt_json::parse(R"(["foo", "bar"])"); - for ( auto item : doc_array.items() ) + for (const auto& item : doc_array.items()) { if (item.key() == "0" ) { @@ -258,11 +258,11 @@ TEST_CASE("alternative string type") } else if (item.key() == "1" ) { - CHECK( item.value() == "bar" ); + CHECK(item.value() == "bar"); } else { - CHECK( false ); + CHECK(false); } } } diff --git a/test/src/unit-bson.cpp b/test/src/unit-bson.cpp index 3be72c7d47..e0e020de0a 100644 --- a/test/src/unit-bson.cpp +++ b/test/src/unit-bson.cpp @@ -683,42 +683,42 @@ class SaxCountdown return events_left-- > 0; } - bool boolean(bool) + bool boolean(bool /*unused*/) { return events_left-- > 0; } - bool number_integer(json::number_integer_t) + bool number_integer(json::number_integer_t /*unused*/) { return events_left-- > 0; } - bool number_unsigned(json::number_unsigned_t) + bool number_unsigned(json::number_unsigned_t /*unused*/) { return events_left-- > 0; } - bool number_float(json::number_float_t, const std::string&) + bool number_float(json::number_float_t /*unused*/, const std::string& /*unused*/) { return events_left-- > 0; } - bool string(std::string&) + bool string(std::string& /*unused*/) { return events_left-- > 0; } - bool binary(std::vector&) + bool binary(std::vector& /*unused*/) { return events_left-- > 0; } - bool start_object(std::size_t) + bool start_object(std::size_t /*unused*/) { return events_left-- > 0; } - bool key(std::string&) + bool key(std::string& /*unused*/) { return events_left-- > 0; } @@ -728,7 +728,7 @@ class SaxCountdown return events_left-- > 0; } - bool start_array(std::size_t) + bool start_array(std::size_t /*unused*/) { return events_left-- > 0; } @@ -738,7 +738,7 @@ class SaxCountdown return events_left-- > 0; } - bool parse_error(std::size_t, const std::string&, const json::exception&) + bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, const json::exception& /*unused*/) { return false; } @@ -746,7 +746,7 @@ class SaxCountdown private: int events_left = 0; }; -} +} // namespace TEST_CASE("Incomplete BSON Input") { diff --git a/test/src/unit-comparison.cpp b/test/src/unit-comparison.cpp index e375feca49..56b38cf6b0 100644 --- a/test/src/unit-comparison.cpp +++ b/test/src/unit-comparison.cpp @@ -41,7 +41,7 @@ bool f(A a, B b, U u = U()) { return u(a, b); } -} +} // namespace TEST_CASE("lexicographical comparison operators") { @@ -143,10 +143,10 @@ TEST_CASE("lexicographical comparison operators") // comparison with discarded elements json j_discarded(json::value_t::discarded); - for (size_t i = 0; i < j_values.size(); ++i) + for (const auto& v : j_values) { - CHECK( (j_values[i] == j_discarded) == false); - CHECK( (j_discarded == j_values[i]) == false); + CHECK( (v == j_discarded) == false); + CHECK( (j_discarded == v) == false); CHECK( (j_discarded == j_discarded) == false); } diff --git a/test/src/unit-constructor2.cpp b/test/src/unit-constructor2.cpp index 27f4dfdcbc..a32ad2eefc 100644 --- a/test/src/unit-constructor2.cpp +++ b/test/src/unit-constructor2.cpp @@ -188,19 +188,19 @@ TEST_CASE("other constructors and destructor") { SECTION("object") { - auto j = new json {{"foo", 1}, {"bar", false}}; + auto* j = new json {{"foo", 1}, {"bar", false}}; delete j; } SECTION("array") { - auto j = new json {"foo", 1, 1u, false, 23.42}; + auto* j = new json {"foo", 1, 1u, false, 23.42}; delete j; } SECTION("string") { - auto j = new json("Hello world"); + auto* j = new json("Hello world"); delete j; } } diff --git a/test/src/unit-convenience.cpp b/test/src/unit-convenience.cpp index c75edac4ea..1d98cd8bec 100644 --- a/test/src/unit-convenience.cpp +++ b/test/src/unit-convenience.cpp @@ -37,7 +37,7 @@ using nlohmann::json; namespace { -void check_escaped(const char* original, const char* escaped = "", const bool ensure_ascii = false); +void check_escaped(const char* original, const char* escaped = "", bool ensure_ascii = false); void check_escaped(const char* original, const char* escaped, const bool ensure_ascii) { std::stringstream ss; @@ -45,7 +45,7 @@ void check_escaped(const char* original, const char* escaped, const bool ensure_ s.dump_escaped(original, ensure_ascii); CHECK(ss.str() == escaped); } -} +} // namespace TEST_CASE("convenience functions") { diff --git a/test/src/unit-json_patch.cpp b/test/src/unit-json_patch.cpp index 2ad7aadb82..a5deef548b 100644 --- a/test/src/unit-json_patch.cpp +++ b/test/src/unit-json_patch.cpp @@ -1258,7 +1258,7 @@ TEST_CASE("JSON patch") SECTION("Tests from github.com/json-patch/json-patch-tests") { - for (auto filename : + for (const auto* filename : { TEST_DATA_DIRECTORY "/json-patch-tests/spec_tests.json", TEST_DATA_DIRECTORY "/json-patch-tests/tests.json" diff --git a/test/src/unit-json_pointer.cpp b/test/src/unit-json_pointer.cpp index 14d8cd1830..9894cef45d 100644 --- a/test/src/unit-json_pointer.cpp +++ b/test/src/unit-json_pointer.cpp @@ -527,7 +527,7 @@ TEST_CASE("JSON pointers") SECTION("string representation") { - for (auto ptr : + for (const auto* ptr : {"", "/foo", "/foo/0", "/", "/a~1b", "/c%d", "/e^f", "/g|h", "/i\\j", "/k\"l", "/ ", "/m~0n" }) { diff --git a/test/src/unit-msgpack.cpp b/test/src/unit-msgpack.cpp index e49a4203c3..d3c60cf9ad 100644 --- a/test/src/unit-msgpack.cpp +++ b/test/src/unit-msgpack.cpp @@ -52,42 +52,42 @@ class SaxCountdown return events_left-- > 0; } - bool boolean(bool) + bool boolean(bool /*unused*/) { return events_left-- > 0; } - bool number_integer(json::number_integer_t) + bool number_integer(json::number_integer_t /*unused*/) { return events_left-- > 0; } - bool number_unsigned(json::number_unsigned_t) + bool number_unsigned(json::number_unsigned_t /*unused*/) { return events_left-- > 0; } - bool number_float(json::number_float_t, const std::string&) + bool number_float(json::number_float_t /*unused*/, const std::string& /*unused*/) { return events_left-- > 0; } - bool string(std::string&) + bool string(std::string& /*unused*/) { return events_left-- > 0; } - bool binary(std::vector&) + bool binary(std::vector& /*unused*/) { return events_left-- > 0; } - bool start_object(std::size_t) + bool start_object(std::size_t /*unused*/) { return events_left-- > 0; } - bool key(std::string&) + bool key(std::string& /*unused*/) { return events_left-- > 0; } @@ -97,7 +97,7 @@ class SaxCountdown return events_left-- > 0; } - bool start_array(std::size_t) + bool start_array(std::size_t /*unused*/) { return events_left-- > 0; } @@ -107,7 +107,7 @@ class SaxCountdown return events_left-- > 0; } - bool parse_error(std::size_t, const std::string&, const json::exception&) + bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, const json::exception& /*unused*/) { return false; } @@ -258,7 +258,7 @@ TEST_CASE("MessagePack") // check individual bytes CHECK(result[0] == 0xcc); - uint8_t restored = static_cast(result[1]); + auto restored = static_cast(result[1]); CHECK(restored == i); // roundtrip @@ -293,7 +293,7 @@ TEST_CASE("MessagePack") // check individual bytes CHECK(result[0] == 0xcd); - uint16_t restored = static_cast(static_cast(result[1]) * 256 + static_cast(result[2])); + auto restored = static_cast(static_cast(result[1]) * 256 + static_cast(result[2])); CHECK(restored == i); // roundtrip @@ -436,7 +436,7 @@ TEST_CASE("MessagePack") const auto result = json::to_msgpack(j); CHECK(result == expected); - int16_t restored = static_cast((result[1] << 8) + result[2]); + auto restored = static_cast((result[1] << 8) + result[2]); CHECK(restored == -9263); // roundtrip @@ -469,7 +469,7 @@ TEST_CASE("MessagePack") // check individual bytes CHECK(result[0] == 0xd1); - int16_t restored = static_cast((result[1] << 8) + result[2]); + auto restored = static_cast((result[1] << 8) + result[2]); CHECK(restored == i); // roundtrip @@ -630,7 +630,7 @@ TEST_CASE("MessagePack") // check individual bytes CHECK(result[0] == 0xcc); - uint8_t restored = static_cast(result[1]); + auto restored = static_cast(result[1]); CHECK(restored == i); // roundtrip @@ -664,7 +664,7 @@ TEST_CASE("MessagePack") // check individual bytes CHECK(result[0] == 0xcd); - uint16_t restored = static_cast(static_cast(result[1]) * 256 + static_cast(result[2])); + auto restored = static_cast(static_cast(result[1]) * 256 + static_cast(result[2])); CHECK(restored == i); // roundtrip @@ -1088,7 +1088,7 @@ TEST_CASE("MessagePack") SECTION("{\"a\": {\"b\": {\"c\": {}}}}") { - json j = json::parse("{\"a\": {\"b\": {\"c\": {}}}}"); + json j = json::parse(R"({"a": {"b": {"c": {}}}})"); std::vector expected = { 0x81, 0xa1, 0x61, 0x81, 0xa1, 0x62, 0x81, 0xa1, 0x63, 0x80 @@ -1870,7 +1870,7 @@ TEST_CASE("MessagePack roundtrips" * doctest::skip()) // parse MessagePack file auto packed = utils::read_binary_file(filename + ".msgpack"); - if (!exclude_packed.count(filename)) + if (exclude_packed.count(filename) == 0u) { { INFO_WITH_TEMP(filename + ": output adapters: std::vector"); diff --git a/test/src/unit-ordered_map.cpp b/test/src/unit-ordered_map.cpp index 645183d8c4..47d049de66 100644 --- a/test/src/unit-ordered_map.cpp +++ b/test/src/unit-ordered_map.cpp @@ -49,6 +49,7 @@ TEST_CASE("ordered_map") std::map m {{"eins", "one"}, {"zwei", "two"}, {"drei", "three"}}; ordered_map om(m.begin(), m.end()); const auto com = om; + om.clear(); // silence a warning by forbidding having "const auto& com = om;" CHECK(com.size() == 3); } } diff --git a/test/src/unit-readme.cpp b/test/src/unit-readme.cpp index ed2a4dcee4..3bf7339484 100644 --- a/test/src/unit-readme.cpp +++ b/test/src/unit-readme.cpp @@ -52,7 +52,7 @@ TEST_CASE("README" * doctest::skip()) { { // redirect std::cout for the README file - auto old_cout_buffer = std::cout.rdbuf(); + auto* old_cout_buffer = std::cout.rdbuf(); std::ostringstream new_stream; std::cout.rdbuf(new_stream.rdbuf()); { diff --git a/test/src/unit-reference_access.cpp b/test/src/unit-reference_access.cpp index c983677658..125c7ea4f0 100644 --- a/test/src/unit-reference_access.cpp +++ b/test/src/unit-reference_access.cpp @@ -56,11 +56,11 @@ TEST_CASE("reference access") json value = {{"one", 1}, {"two", 2}}; // check if references are returned correctly - test_type& p1 = value.get_ref(); + auto& p1 = value.get_ref(); CHECK(&p1 == value.get_ptr()); CHECK(p1 == value.get()); - const test_type& p2 = value.get_ref(); + const auto& p2 = value.get_ref(); CHECK(&p2 == value.get_ptr()); CHECK(p2 == value.get()); @@ -95,7 +95,7 @@ TEST_CASE("reference access") // test_type& p1 = value.get_ref(); // check if references are returned correctly - const test_type& p2 = value.get_ref(); + const auto& p2 = value.get_ref(); CHECK(&p2 == value.get_ptr()); CHECK(p2 == value.get()); } @@ -106,11 +106,11 @@ TEST_CASE("reference access") json value = {1, 2, 3, 4}; // check if references are returned correctly - test_type& p1 = value.get_ref(); + auto& p1 = value.get_ref(); CHECK(&p1 == value.get_ptr()); CHECK(p1 == value.get()); - const test_type& p2 = value.get_ref(); + const auto& p2 = value.get_ref(); CHECK(&p2 == value.get_ptr()); CHECK(p2 == value.get()); @@ -142,11 +142,11 @@ TEST_CASE("reference access") json value = "hello"; // check if references are returned correctly - test_type& p1 = value.get_ref(); + auto& p1 = value.get_ref(); CHECK(&p1 == value.get_ptr()); CHECK(p1 == value.get()); - const test_type& p2 = value.get_ref(); + const auto& p2 = value.get_ref(); CHECK(&p2 == value.get_ptr()); CHECK(p2 == value.get()); @@ -178,11 +178,11 @@ TEST_CASE("reference access") json value = false; // check if references are returned correctly - test_type& p1 = value.get_ref(); + auto& p1 = value.get_ref(); CHECK(&p1 == value.get_ptr()); CHECK(p1 == value.get()); - const test_type& p2 = value.get_ref(); + const auto& p2 = value.get_ref(); CHECK(&p2 == value.get_ptr()); CHECK(p2 == value.get()); @@ -214,11 +214,11 @@ TEST_CASE("reference access") json value = -23; // check if references are returned correctly - test_type& p1 = value.get_ref(); + auto& p1 = value.get_ref(); CHECK(&p1 == value.get_ptr()); CHECK(p1 == value.get()); - const test_type& p2 = value.get_ref(); + const auto& p2 = value.get_ref(); CHECK(&p2 == value.get_ptr()); CHECK(p2 == value.get()); @@ -250,11 +250,11 @@ TEST_CASE("reference access") json value = 23u; // check if references are returned correctly - test_type& p1 = value.get_ref(); + auto& p1 = value.get_ref(); CHECK(&p1 == value.get_ptr()); CHECK(p1 == value.get()); - const test_type& p2 = value.get_ref(); + const auto& p2 = value.get_ref(); CHECK(&p2 == value.get_ptr()); CHECK(p2 == value.get()); @@ -286,11 +286,11 @@ TEST_CASE("reference access") json value = 42.23; // check if references are returned correctly - test_type& p1 = value.get_ref(); + auto& p1 = value.get_ref(); CHECK(&p1 == value.get_ptr()); CHECK(p1 == value.get()); - const test_type& p2 = value.get_ref(); + const auto& p2 = value.get_ref(); CHECK(&p2 == value.get_ptr()); CHECK(p2 == value.get()); diff --git a/test/src/unit-regression2.cpp b/test/src/unit-regression2.cpp index 107d16b8cb..5ea8de63d1 100644 --- a/test/src/unit-regression2.cpp +++ b/test/src/unit-regression2.cpp @@ -122,7 +122,7 @@ struct adl_serializer return {}; } }; -} +} // namespace nlohmann ///////////////////////////////////////////////////////////////////// // for #1805 @@ -139,7 +139,7 @@ TEST_CASE("regression tests 2") { SECTION("issue #1001 - Fix memory leak during parser callback") { - auto geojsonExample = R"( + const auto* geojsonExample = R"( { "type": "FeatureCollection", "features": [ { "type": "Feature", @@ -174,7 +174,7 @@ TEST_CASE("regression tests 2") ] })"; - json::parser_callback_t cb = [&](int, json::parse_event_t event, json & parsed) + json::parser_callback_t cb = [&](int /*level*/, json::parse_event_t event, json & parsed) { // skip uninteresting events if (event == json::parse_event_t::value && !parsed.is_primitive()) @@ -290,7 +290,7 @@ TEST_CASE("regression tests 2") json dump_test; dump_test["1"] = std::string(length, -1); - std::string expected = "{\"1\":\""; + std::string expected = R"({"1":")"; for (int i = 0; i < length; ++i) { expected += "\\ufffd"; @@ -307,7 +307,7 @@ TEST_CASE("regression tests 2") json dump_test; dump_test["1"] = std::string(length, -2); - std::string expected = "{\"1\":\""; + std::string expected = R"({"1":")"; for (int i = 0; i < length; ++i) { expected += "\xEF\xBF\xBD"; @@ -340,9 +340,9 @@ TEST_CASE("regression tests 2") -54, -28, -26 }; std::string s; - for (unsigned i = 0; i < sizeof(data) / sizeof(int); i++) + for (int i : data) { - s += static_cast(data[i]); + s += static_cast(i); } dump_test["1"] = s; dump_test.dump(-1, ' ', true, nlohmann::json::error_handler_t::replace); diff --git a/test/src/unit-to_chars.cpp b/test/src/unit-to_chars.cpp index 2861928f8f..b94b087559 100644 --- a/test/src/unit-to_chars.cpp +++ b/test/src/unit-to_chars.cpp @@ -38,7 +38,7 @@ using nlohmann::detail::dtoa_impl::reinterpret_bits; namespace { -static float make_float(uint32_t sign_bit, uint32_t biased_exponent, uint32_t significand) +float make_float(uint32_t sign_bit, uint32_t biased_exponent, uint32_t significand) { assert(sign_bit == 0 || sign_bit == 1); assert(biased_exponent <= 0xFF); diff --git a/test/src/unit-udt.cpp b/test/src/unit-udt.cpp index b333d0b65f..be7827df35 100644 --- a/test/src/unit-udt.cpp +++ b/test/src/unit-udt.cpp @@ -34,8 +34,10 @@ using nlohmann::json; #include #include +#include #include #include +#include namespace udt { @@ -55,40 +57,40 @@ struct age struct name { std::string m_val; - name(const std::string rhs = "") : m_val(rhs) {} + name(std::string rhs = "") : m_val(std::move(rhs)) {} }; struct address { std::string m_val; - address(const std::string rhs = "") : m_val(rhs) {} + address(std::string rhs = "") : m_val(std::move(rhs)) {} }; struct person { age m_age; name m_name; - country m_country; - person() : m_age(), m_name(), m_country() {} - person(const age& a, const name& n, const country& c) : m_age(a), m_name(n), m_country(c) {} + country m_country{}; + person() = default; + person(const age& a, name n, const country& c) : m_age(a), m_name(std::move(n)), m_country(c) {} }; struct contact { person m_person; address m_address; - contact() : m_person(), m_address() {} - contact(const person& p, const address& a) : m_person(p), m_address(a) {} + contact() = default; + contact(person p, address a) : m_person(std::move(p)), m_address(std::move(a)) {} }; struct contact_book { name m_book_name; std::vector m_contacts; - contact_book() : m_book_name(), m_contacts() {} - contact_book(const name& n, const std::vector& c) : m_book_name(n), m_contacts(c) {} + contact_book() = default; + contact_book(name n, std::vector c) : m_book_name(std::move(n)), m_contacts(std::move(c)) {} }; -} +} // namespace udt // to_json methods namespace udt @@ -178,7 +180,7 @@ static bool operator==(const contact_book& lhs, const contact_book& rhs) return std::tie(lhs.m_book_name, lhs.m_contacts) == std::tie(rhs.m_book_name, rhs.m_contacts); } -} +} // namespace udt // from_json methods namespace udt @@ -207,7 +209,7 @@ static void from_json(const BasicJsonType& j, country& c) }; const auto it = m.find(str); - // TODO test exceptions + // TODO(nlohmann) test exceptions c = it->second; } @@ -235,7 +237,7 @@ static void from_json(const nlohmann::json& j, contact_book& cb) cb.m_book_name = j["name"].get(); cb.m_contacts = j["contacts"].get>(); } -} +} // namespace udt TEST_CASE("basic usage" * doctest::test_suite("udt")) { @@ -345,10 +347,10 @@ namespace udt struct legacy_type { std::string number; - legacy_type() : number() {} - legacy_type(const std::string& n) : number(n) {} + legacy_type() = default; + legacy_type(std::string n) : number(std::move(n)) {} }; -} +} // namespace udt namespace nlohmann { @@ -393,7 +395,7 @@ struct adl_serializer l.number = std::to_string(j.get()); } }; -} +} // namespace nlohmann TEST_CASE("adl_serializer specialization" * doctest::test_suite("udt")) { @@ -453,23 +455,23 @@ template <> struct adl_serializer> { using type = std::vector; - static void to_json(json& j, const type&) + static void to_json(json& j, const type& /*type*/) { j = "hijacked!"; } - static void from_json(const json&, type& opt) + static void from_json(const json& /*unnamed*/, type& opt) { opt = {42.0, 42.0, 42.0}; } // preferred version - static type from_json(const json&) + static type from_json(const json& /*unnamed*/) { return {4.0, 5.0, 6.0}; } }; -} +} // namespace nlohmann TEST_CASE("even supported types can be specialized" * doctest::test_suite("udt")) { @@ -504,13 +506,11 @@ struct adl_serializer> { return nullptr; } - else - { - return std::unique_ptr(new T(j.get())); - } + + return std::unique_ptr(new T(j.get())); } }; -} +} // namespace nlohmann TEST_CASE("Non-copyable types" * doctest::test_suite("udt")) { @@ -566,8 +566,8 @@ struct pod_serializer std::is_pod::value && std::is_class::value, int >::type = 0 > static void from_json(const BasicJsonType& j, U& t) { - std::uint64_t value; - // TODO The following block is no longer relevant in this serializer, make another one that shows the issue + std::uint64_t value = 0; + // The following block is no longer relevant in this serializer, make another one that shows the issue // the problem arises only when one from_json method is defined without any constraint // // Why cannot we simply use: j.get() ? @@ -582,7 +582,7 @@ struct pod_serializer // calling get calls from_json, for now, we cannot do this in custom // serializers nlohmann::from_json(j, value); - auto bytes = static_cast(static_cast(&value)); + auto* bytes = static_cast(static_cast(&value)); std::memcpy(&t, bytes, sizeof(value)); } @@ -601,8 +601,8 @@ struct pod_serializer std::is_pod::value && std::is_class::value, int >::type = 0 > static void to_json(BasicJsonType& j, const T& t) noexcept { - auto bytes = static_cast< const unsigned char*>(static_cast(&t)); - std::uint64_t value; + const auto* bytes = static_cast< const unsigned char*>(static_cast(&t)); + std::uint64_t value = 0; std::memcpy(&value, bytes, sizeof(value)); nlohmann::to_json(j, value); } @@ -620,8 +620,8 @@ struct small_pod struct non_pod { std::string s; - non_pod() : s() {} - non_pod(const std::string& S) : s(S) {} + non_pod() = default; + non_pod(std::string S) : s(std::move(S)) {} }; template @@ -651,7 +651,7 @@ static std::ostream& operator<<(std::ostream& os, small_pod l) { return os << "begin: " << l.begin << ", middle: " << l.middle << ", end: " << l.end; } -} +} // namespace udt TEST_CASE("custom serializer for pods" * doctest::test_suite("udt")) { @@ -803,7 +803,7 @@ struct is_constructible_patched : std::false_type {}; template struct is_constructible_patched())))> : std::true_type {}; -} +} // namespace TEST_CASE("an incomplete type does not trigger a compiler error in non-evaluated context" * doctest::test_suite("udt")) { @@ -822,8 +822,8 @@ class Evil int m_i = 0; }; -void from_json(const json&, Evil&) {} -} +void from_json(const json& /*unused*/, Evil& /*unused*/) {} +} // namespace TEST_CASE("Issue #924") { diff --git a/test/src/unit-udt_macro.cpp b/test/src/unit-udt_macro.cpp index a13ac006b3..3343055718 100644 --- a/test/src/unit-udt_macro.cpp +++ b/test/src/unit-udt_macro.cpp @@ -39,7 +39,7 @@ namespace persons class person_with_private_data { private: - std::string name = ""; + std::string name; int age = 0; json metadata = nullptr; @@ -62,7 +62,7 @@ class person_with_private_data class person_without_private_data_1 { public: - std::string name = ""; + std::string name; int age = 0; json metadata = nullptr; @@ -84,7 +84,7 @@ class person_without_private_data_1 class person_without_private_data_2 { public: - std::string name = ""; + std::string name; int age = 0; json metadata = nullptr; diff --git a/test/src/unit-unicode.cpp b/test/src/unit-unicode.cpp index acaca2888d..0ffcb76dd2 100644 --- a/test/src/unit-unicode.cpp +++ b/test/src/unit-unicode.cpp @@ -168,7 +168,7 @@ void check_utf8string(bool success_expected, int byte1, int byte2 = -1, int byte CHECK_THROWS_AS(_ = json::parse(json_string), json::parse_error&); } } -} +} // namespace TEST_CASE("Unicode" * doctest::skip()) { @@ -1159,7 +1159,7 @@ TEST_CASE("Unicode" * doctest::skip()) SECTION("check JSON Pointers") { - for (auto s : j) + for (const auto& s : j) { // skip non-string JSON values if (!s.is_string()) @@ -1176,7 +1176,7 @@ TEST_CASE("Unicode" * doctest::skip()) } // JSON Pointers must begin with "/" - ptr = "/" + ptr; + ptr.insert(0, "/"); CHECK_NOTHROW(json::json_pointer("/" + ptr)); @@ -1256,7 +1256,7 @@ void roundtrip(bool success_expected, const std::string& s) CHECK_THROWS_AS(_ = json::parse(ps), json::parse_error&); } } -} +} // namespace TEST_CASE("Markus Kuhn's UTF-8 decoder capability and stress test") { diff --git a/test/src/unit-wstring.cpp b/test/src/unit-wstring.cpp index a5b80be742..b8ee2eaeb2 100644 --- a/test/src/unit-wstring.cpp +++ b/test/src/unit-wstring.cpp @@ -51,7 +51,7 @@ bool u32string_is_utf32() { return (std::u32string(U"πŸ’©") == std::u32string(U"\U0001F4A9")); } -} +} // namespace TEST_CASE("wide strings") { From 87d1dc7ed24f6901706cbbe59650a1fc612ac809 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Wed, 27 Jan 2021 22:48:20 +0100 Subject: [PATCH 051/113] :construction_worker: add CI steps for Clang-Tidy --- .clang-tidy | 9 ++++++++- .github/workflows/ubuntu.yml | 21 +++++++++++++++++++++ 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/.clang-tidy b/.clang-tidy index 046d84f870..395647e8b6 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -1,22 +1,29 @@ Checks: '*, -cppcoreguidelines-avoid-goto, -cppcoreguidelines-avoid-magic-numbers, + -cppcoreguidelines-avoid-non-const-global-variables, -cppcoreguidelines-macro-usage, + -cppcoreguidelines-pro-type-union-access, -fuchsia-default-arguments-calls, -fuchsia-default-arguments-declarations, -fuchsia-overloaded-operator, -google-explicit-constructor, + -google-readability-function-size, -google-runtime-references, -hicpp-avoid-goto, -hicpp-explicit-conversions, + -hicpp-function-size, -hicpp-no-array-decay, + -hicpp-no-assembler, -hicpp-uppercase-literal-suffix, -llvm-header-guard, -llvm-include-order, -misc-non-private-member-variables-in-classes, -modernize-use-trailing-return-type, + -readability-function-size, -readability-magic-numbers, - -readability-uppercase-literal-suffix' + -readability-uppercase-literal-suffix, + -llvmlibc-*' CheckOptions: - key: hicpp-special-member-functions.AllowSoleDefaultDtor diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 8036fba2a1..10c6fbf26b 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -97,3 +97,24 @@ jobs: run: cmake -S . -B build -DJSON_CI=On - name: build run: cmake --build build --target ci_cpplint + + ci_clang_tidy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v1 + - name: install_ninja + run: | + sudo apt update + sudo apt install ninja-build + shell: bash + - name: install_clang + run: | + wget https://apt.llvm.org/llvm.sh + chmod +x llvm.sh + sudo ./llvm.sh 11 + sudo apt-get install clang-tools-11 + shell: bash + - name: cmake + run: cmake -S . -B build -DJSON_CI=On + - name: build + run: cmake --build build --target ci_clang_tidy || true From 81c8ded93c9a62c7798c4b6d819941b8e0a7f784 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Wed, 27 Jan 2021 22:53:52 +0100 Subject: [PATCH 052/113] :construction_worker: add CI steps for Clang-Tidy --- .github/workflows/macos.yml | 2 +- .github/workflows/ubuntu.yml | 23 ++++++----------------- .github/workflows/windows.yml | 8 ++++---- 3 files changed, 11 insertions(+), 22 deletions(-) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 5b178ad055..aad3a787de 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -8,7 +8,7 @@ jobs: runs-on: macos-latest steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v2 - name: cmake run: cmake -S . -B build -D CMAKE_BUILD_TYPE=Debug -DJSON_BuildTests=On - name: build diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 10c6fbf26b..e11d5cd646 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -6,7 +6,7 @@ jobs: ci_test_clang_cxx20: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v2 - name: install_clang run: | sudo apt update @@ -20,7 +20,7 @@ jobs: ci_clang_analyze: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v2 - name: install_ninja run: | sudo apt update @@ -41,7 +41,7 @@ jobs: ci_test_clang: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v2 - name: install_ninja run: | sudo apt update @@ -92,7 +92,7 @@ jobs: ci_cpplint: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v2 - name: cmake run: cmake -S . -B build -DJSON_CI=On - name: build @@ -100,20 +100,9 @@ jobs: ci_clang_tidy: runs-on: ubuntu-latest + container: nlohmann/json-ci:latest steps: - - uses: actions/checkout@v1 - - name: install_ninja - run: | - sudo apt update - sudo apt install ninja-build - shell: bash - - name: install_clang - run: | - wget https://apt.llvm.org/llvm.sh - chmod +x llvm.sh - sudo ./llvm.sh 11 - sudo apt-get install clang-tools-11 - shell: bash + - uses: actions/checkout@v2 - name: cmake run: cmake -S . -B build -DJSON_CI=On - name: build diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 1778c94182..ae243762fe 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -7,7 +7,7 @@ jobs: runs-on: windows-latest steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v2 - name: cmake run: cmake -S . -B build -G "Visual Studio 16 2019" -D CMAKE_BUILD_TYPE=Debug -DJSON_BuildTests=On - name: build @@ -19,7 +19,7 @@ jobs: runs-on: windows-latest steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v2 - name: install Clang run: curl -fsSL -o LLVM10.exe https://github.com/llvm/llvm-project/releases/download/llvmorg-10.0.0/LLVM-10.0.0-win64.exe ; 7z x LLVM10.exe -y -o"C:/Program Files/LLVM" - name: cmake @@ -33,7 +33,7 @@ jobs: runs-on: windows-latest steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v2 - name: cmake run: cmake -S . -B build -G "Visual Studio 16 2019" -A x64 -T ClangCL -DJSON_BuildTests=On - name: build @@ -45,7 +45,7 @@ jobs: runs-on: windows-latest steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v2 - name: cmake run: cmake -S . -B build -G "Visual Studio 16 2019" -A Win32 -T ClangCL -DJSON_BuildTests=On - name: build From 10fc3520d7daec74a05cbd86d75c41aca716257b Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Wed, 27 Jan 2021 22:58:24 +0100 Subject: [PATCH 053/113] :rotating_light: fix warnings --- test/src/unit-udt.cpp | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/test/src/unit-udt.cpp b/test/src/unit-udt.cpp index be7827df35..2c2f6a0bc1 100644 --- a/test/src/unit-udt.cpp +++ b/test/src/unit-udt.cpp @@ -68,8 +68,8 @@ struct address struct person { - age m_age; - name m_name; + age m_age{}; + name m_name{}; country m_country{}; person() = default; person(const age& a, name n, const country& c) : m_age(a), m_name(std::move(n)), m_country(c) {} @@ -77,15 +77,15 @@ struct person struct contact { - person m_person; - address m_address; + person m_person{}; + address m_address{}; contact() = default; contact(person p, address a) : m_person(std::move(p)), m_address(std::move(a)) {} }; struct contact_book { - name m_book_name; + name m_book_name{}; std::vector m_contacts; contact_book() = default; contact_book(name n, std::vector c) : m_book_name(std::move(n)), m_contacts(std::move(c)) {} @@ -347,8 +347,8 @@ namespace udt struct legacy_type { std::string number; - legacy_type() = default; - legacy_type(std::string n) : number(std::move(n)) {} + legacy_type() : number() {} + legacy_type(std::string n) : number(std::move(n)) {} }; } // namespace udt @@ -620,7 +620,7 @@ struct small_pod struct non_pod { std::string s; - non_pod() = default; + non_pod() : s() {} non_pod(std::string S) : s(std::move(S)) {} }; From c243cd7abe5c8c427b6344b8743ea8f5a310f0c6 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Thu, 28 Jan 2021 11:18:56 +0100 Subject: [PATCH 054/113] :rotating_light: fix warnings --- test/src/unit-udt.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/src/unit-udt.cpp b/test/src/unit-udt.cpp index 2c2f6a0bc1..6c810af76b 100644 --- a/test/src/unit-udt.cpp +++ b/test/src/unit-udt.cpp @@ -86,7 +86,7 @@ struct contact struct contact_book { name m_book_name{}; - std::vector m_contacts; + std::vector m_contacts{}; contact_book() = default; contact_book(name n, std::vector c) : m_book_name(std::move(n)), m_contacts(std::move(c)) {} }; From ea69fe0f0eb71b4e596bf410a2ef6e9438ce2548 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Thu, 28 Jan 2021 11:21:20 +0100 Subject: [PATCH 055/113] :wrench: select proper binary --- cmake/ci.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/ci.cmake b/cmake/ci.cmake index bf276da992..01ade85626 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -495,7 +495,7 @@ add_custom_target(ci_oclint add_custom_target(ci_clang_tidy COMMAND CXX=${CLANG_TOOL} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -GNinja - -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DCMAKE_CXX_CLANG_TIDY=clang-tidy + -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DCMAKE_CXX_CLANG_TIDY=${CLANG_TIDY_TOOL} -DJSON_BuildTests=ON -DJSON_MultipleHeaders=ON -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_clang_tidy COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_clang_tidy From eba434a95a70e558ff97f9b936acc7a99c208df6 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Thu, 28 Jan 2021 11:26:12 +0100 Subject: [PATCH 056/113] :rotating_light: fix warnings --- test/src/unit-udt_macro.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/src/unit-udt_macro.cpp b/test/src/unit-udt_macro.cpp index 3343055718..0235981981 100644 --- a/test/src/unit-udt_macro.cpp +++ b/test/src/unit-udt_macro.cpp @@ -39,7 +39,7 @@ namespace persons class person_with_private_data { private: - std::string name; + std::string name{}; int age = 0; json metadata = nullptr; @@ -62,7 +62,7 @@ class person_with_private_data class person_without_private_data_1 { public: - std::string name; + std::string name{}; int age = 0; json metadata = nullptr; @@ -84,7 +84,7 @@ class person_without_private_data_1 class person_without_private_data_2 { public: - std::string name; + std::string name{}; int age = 0; json metadata = nullptr; From 2d175d94e7783a1fcac4a4f3369a259e3a99696f Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Thu, 28 Jan 2021 12:43:19 +0100 Subject: [PATCH 057/113] :rotating_light: fix warnings --- test/src/unit-algorithms.cpp | 4 +- test/src/unit-allocator.cpp | 28 ++--- test/src/unit-cbor.cpp | 26 ++-- test/src/unit-class_lexer.cpp | 6 +- test/src/unit-class_parser.cpp | 203 +++++++++++------------------- test/src/unit-constructor1.cpp | 8 +- test/src/unit-conversions.cpp | 76 +++++------ test/src/unit-deserialization.cpp | 89 +++++++------ test/src/unit-msgpack.cpp | 6 +- test/src/unit-noexcept.cpp | 18 +-- test/src/unit-regression1.cpp | 29 ++--- test/src/unit-regression2.cpp | 10 +- test/src/unit-serialization.cpp | 6 +- test/src/unit-testsuites.cpp | 50 ++++---- test/src/unit-to_chars.cpp | 8 +- test/src/unit-ubjson.cpp | 22 ++-- test/src/unit-udt.cpp | 1 - 17 files changed, 267 insertions(+), 323 deletions(-) diff --git a/test/src/unit-algorithms.cpp b/test/src/unit-algorithms.cpp index c08e858a62..3f8340068b 100644 --- a/test/src/unit-algorithms.cpp +++ b/test/src/unit-algorithms.cpp @@ -43,7 +43,7 @@ TEST_CASE("algorithms") { CHECK(std::all_of(j_array.begin(), j_array.end(), [](const json & value) { - return value.size() > 0; + return !value.empty(); })); CHECK(std::all_of(j_object.begin(), j_object.end(), [](const json & value) { @@ -67,7 +67,7 @@ TEST_CASE("algorithms") { CHECK(std::none_of(j_array.begin(), j_array.end(), [](const json & value) { - return value.size() == 0; + return value.empty(); })); CHECK(std::none_of(j_object.begin(), j_object.end(), [](const json & value) { diff --git a/test/src/unit-allocator.cpp b/test/src/unit-allocator.cpp index ad78b8f9ea..f432ac0ad2 100644 --- a/test/src/unit-allocator.cpp +++ b/test/src/unit-allocator.cpp @@ -40,12 +40,12 @@ template struct bad_allocator : std::allocator { template - void construct(T*, Args&& ...) + void construct(T* /*unused*/, Args&& ... /*unused*/) { throw std::bad_alloc(); } }; -} +} // namespace TEST_CASE("bad_alloc") { @@ -85,10 +85,8 @@ struct my_allocator : std::allocator next_construct_fails = false; throw std::bad_alloc(); } - else - { - ::new (reinterpret_cast(p)) T(std::forward(args)...); - } + + ::new (reinterpret_cast(p)) T(std::forward(args)...); } void deallocate(T* p, std::size_t n) @@ -98,10 +96,8 @@ struct my_allocator : std::allocator next_deallocate_fails = false; throw std::bad_alloc(); } - else - { - std::allocator::deallocate(p, n); - } + + std::allocator::deallocate(p, n); } void destroy(T* p) @@ -111,10 +107,8 @@ struct my_allocator : std::allocator next_destroy_fails = false; throw std::bad_alloc(); } - else - { - p->~T(); - } + + p->~T(); } template @@ -133,7 +127,7 @@ void my_allocator_clean_up(T* p) alloc.destroy(p); alloc.deallocate(p, 1); } -} +} // namespace TEST_CASE("controlled bad_alloc") { @@ -239,7 +233,7 @@ namespace template struct allocator_no_forward : std::allocator { - allocator_no_forward() {} + allocator_no_forward() = default; template allocator_no_forward(allocator_no_forward) {} @@ -256,7 +250,7 @@ struct allocator_no_forward : std::allocator ::new (static_cast(p)) T(args...); } }; -} +} // namespace TEST_CASE("bad my_allocator::construct") { diff --git a/test/src/unit-cbor.cpp b/test/src/unit-cbor.cpp index 9ed80c8f1a..6a8e709c18 100644 --- a/test/src/unit-cbor.cpp +++ b/test/src/unit-cbor.cpp @@ -219,7 +219,7 @@ TEST_CASE("CBOR") // create expected byte vector std::vector expected; expected.push_back(static_cast(0x3b)); - uint64_t positive = static_cast(-1 - i); + auto positive = static_cast(-1 - i); expected.push_back(static_cast((positive >> 56) & 0xff)); expected.push_back(static_cast((positive >> 48) & 0xff)); expected.push_back(static_cast((positive >> 40) & 0xff)); @@ -276,7 +276,7 @@ TEST_CASE("CBOR") // create expected byte vector std::vector expected; expected.push_back(static_cast(0x3a)); - uint32_t positive = static_cast(static_cast(-1 - i) & 0x00000000ffffffff); + auto positive = static_cast(static_cast(-1 - i) & 0x00000000ffffffff); expected.push_back(static_cast((positive >> 24) & 0xff)); expected.push_back(static_cast((positive >> 16) & 0xff)); expected.push_back(static_cast((positive >> 8) & 0xff)); @@ -317,7 +317,7 @@ TEST_CASE("CBOR") // create expected byte vector std::vector expected; expected.push_back(static_cast(0x39)); - uint16_t positive = static_cast(-1 - i); + auto positive = static_cast(-1 - i); expected.push_back(static_cast((positive >> 8) & 0xff)); expected.push_back(static_cast(positive & 0xff)); @@ -328,7 +328,7 @@ TEST_CASE("CBOR") // check individual bytes CHECK(result[0] == 0x39); - uint16_t restored = static_cast(static_cast(result[1]) * 256 + static_cast(result[2])); + auto restored = static_cast(static_cast(result[1]) * 256 + static_cast(result[2])); CHECK(restored == positive); CHECK(-1 - restored == i); @@ -346,7 +346,7 @@ TEST_CASE("CBOR") const auto result = json::to_cbor(j); CHECK(result == expected); - int16_t restored = static_cast(-1 - ((result[1] << 8) + result[2])); + auto restored = static_cast(-1 - ((result[1] << 8) + result[2])); CHECK(restored == -9263); // roundtrip @@ -506,7 +506,7 @@ TEST_CASE("CBOR") // check individual bytes CHECK(result[0] == 0x19); - uint16_t restored = static_cast(static_cast(result[1]) * 256 + static_cast(result[2])); + auto restored = static_cast(static_cast(result[1]) * 256 + static_cast(result[2])); CHECK(restored == i); // roundtrip @@ -634,7 +634,7 @@ TEST_CASE("CBOR") // check individual bytes CHECK(result[0] == 0xd1); - int16_t restored = static_cast((result[1] << 8) + result[2]); + auto restored = static_cast((result[1] << 8) + result[2]); CHECK(restored == i); // roundtrip @@ -699,7 +699,7 @@ TEST_CASE("CBOR") // check individual bytes CHECK(result[0] == 0x18); - uint8_t restored = static_cast(result[1]); + auto restored = static_cast(result[1]); CHECK(restored == i); // roundtrip @@ -733,7 +733,7 @@ TEST_CASE("CBOR") // check individual bytes CHECK(result[0] == 0x19); - uint16_t restored = static_cast(static_cast(result[1]) * 256 + static_cast(result[2])); + auto restored = static_cast(static_cast(result[1]) * 256 + static_cast(result[2])); CHECK(restored == i); // roundtrip @@ -940,7 +940,7 @@ TEST_CASE("CBOR") } SECTION("-3.40282e+38(lowest float)") { - double v = static_cast(std::numeric_limits::lowest()); + auto v = static_cast(std::numeric_limits::lowest()); json j = v; std::vector expected = { @@ -1340,7 +1340,7 @@ TEST_CASE("CBOR") SECTION("{\"a\": {\"b\": {\"c\": {}}}}") { - json j = json::parse("{\"a\": {\"b\": {\"c\": {}}}}"); + json j = json::parse(R"({"a": {"b": {"c": {}}}})"); std::vector expected = { 0xa1, 0x61, 0x61, 0xa1, 0x61, 0x62, 0xa1, 0x61, 0x63, 0xa0 @@ -2249,7 +2249,7 @@ TEST_CASE("CBOR roundtrips" * doctest::skip()) // parse CBOR file auto packed = utils::read_binary_file(filename + ".cbor"); - if (!exclude_packed.count(filename)) + if (exclude_packed.count(filename) == 0u) { { INFO_WITH_TEMP(filename + ": output adapters: std::vector"); @@ -2323,7 +2323,7 @@ TEST_CASE("all CBOR first bytes") // check that parse_error.112 is only thrown if the // first byte is in the unsupported set INFO_WITH_TEMP(e.what()); - if (std::find(unsupported.begin(), unsupported.end(), byte) != unsupported.end()) + if (unsupported.find(byte) != unsupported.end()) { CHECK(e.id == 112); } diff --git a/test/src/unit-class_lexer.cpp b/test/src/unit-class_lexer.cpp index 07d243a818..ec9ce073ec 100644 --- a/test/src/unit-class_lexer.cpp +++ b/test/src/unit-class_lexer.cpp @@ -36,15 +36,15 @@ using nlohmann::json; namespace { // shortcut to scan a string literal -json::lexer::token_type scan_string(const char* s, const bool ignore_comments = false); +json::lexer::token_type scan_string(const char* s, bool ignore_comments = false); json::lexer::token_type scan_string(const char* s, const bool ignore_comments) { auto ia = nlohmann::detail::input_adapter(s); return nlohmann::detail::lexer(std::move(ia), ignore_comments).scan(); } -} +} // namespace -std::string get_error_message(const char* s, const bool ignore_comments = false); +std::string get_error_message(const char* s, bool ignore_comments = false); std::string get_error_message(const char* s, const bool ignore_comments) { auto ia = nlohmann::detail::input_adapter(s); diff --git a/test/src/unit-class_parser.cpp b/test/src/unit-class_parser.cpp index 2df07d6d44..c8255a4bf7 100644 --- a/test/src/unit-class_parser.cpp +++ b/test/src/unit-class_parser.cpp @@ -42,13 +42,13 @@ class SaxEventLogger public: bool null() { - events.push_back("null()"); + events.emplace_back("null()"); return true; } bool boolean(bool val) { - events.push_back(val ? "boolean(true)" : "boolean(false)"); + events.emplace_back(val ? "boolean(true)" : "boolean(false)"); return true; } @@ -64,7 +64,7 @@ class SaxEventLogger return true; } - bool number_float(json::number_float_t, const std::string& s) + bool number_float(json::number_float_t /*unused*/, const std::string& s) { events.push_back("number_float(" + s + ")"); return true; @@ -79,7 +79,7 @@ class SaxEventLogger bool binary(json::binary_t& val) { std::string binary_contents = "binary("; - std::string comma_space = ""; + std::string comma_space; for (auto b : val) { binary_contents.append(comma_space); @@ -95,7 +95,7 @@ class SaxEventLogger { if (elements == std::size_t(-1)) { - events.push_back("start_object()"); + events.emplace_back("start_object()"); } else { @@ -112,7 +112,7 @@ class SaxEventLogger bool end_object() { - events.push_back("end_object()"); + events.emplace_back("end_object()"); return true; } @@ -120,7 +120,7 @@ class SaxEventLogger { if (elements == std::size_t(-1)) { - events.push_back("start_array()"); + events.emplace_back("start_array()"); } else { @@ -131,11 +131,11 @@ class SaxEventLogger bool end_array() { - events.push_back("end_array()"); + events.emplace_back("end_array()"); return true; } - bool parse_error(std::size_t position, const std::string&, const json::exception&) + bool parse_error(std::size_t position, const std::string& /*unused*/, const json::exception& /*unused*/) { errored = true; events.push_back("parse_error(" + std::to_string(position) + ")"); @@ -157,42 +157,42 @@ class SaxCountdown : public nlohmann::json::json_sax_t return events_left-- > 0; } - bool boolean(bool) override + bool boolean(bool /*val*/) override { return events_left-- > 0; } - bool number_integer(json::number_integer_t) override + bool number_integer(json::number_integer_t /*val*/) override { return events_left-- > 0; } - bool number_unsigned(json::number_unsigned_t) override + bool number_unsigned(json::number_unsigned_t /*val*/) override { return events_left-- > 0; } - bool number_float(json::number_float_t, const std::string&) override + bool number_float(json::number_float_t /*val*/, const std::string& /*s*/) override { return events_left-- > 0; } - bool string(std::string&) override + bool string(std::string& /*val*/) override { return events_left-- > 0; } - bool binary(json::binary_t&) override + bool binary(json::binary_t& /*val*/) override { return events_left-- > 0; } - bool start_object(std::size_t) override + bool start_object(std::size_t /*elements*/) override { return events_left-- > 0; } - bool key(std::string&) override + bool key(std::string& /*val*/) override { return events_left-- > 0; } @@ -202,7 +202,7 @@ class SaxCountdown : public nlohmann::json::json_sax_t return events_left-- > 0; } - bool start_array(std::size_t) override + bool start_array(std::size_t /*elements*/) override { return events_left-- > 0; } @@ -212,7 +212,7 @@ class SaxCountdown : public nlohmann::json::json_sax_t return events_left-- > 0; } - bool parse_error(std::size_t, const std::string&, const json::exception&) override + bool parse_error(std::size_t /*position*/, const std::string& /*last_token*/, const json::exception& /*ex*/) override { return false; } @@ -267,7 +267,7 @@ bool accept_helper(const std::string& s) CHECK(json::parser(nlohmann::detail::input_adapter(s)).accept(false) == !el.errored); // 5. parse with simple callback - json::parser_callback_t cb = [](int, json::parse_event_t, json&) + json::parser_callback_t cb = [](int /*unused*/, json::parse_event_t /*unused*/, json& /*unused*/) { return true; }; @@ -641,8 +641,8 @@ TEST_CASE("parser class") SECTION("overflow") { // overflows during parsing yield an exception - CHECK_THROWS_AS(parser_helper("1.18973e+4932") == json(), json::out_of_range&); - CHECK_THROWS_WITH(parser_helper("1.18973e+4932") == json(), + CHECK_THROWS_AS(parser_helper("1.18973e+4932").empty(), json::out_of_range&); + CHECK_THROWS_WITH(parser_helper("1.18973e+4932").empty(), "[json.exception.out_of_range.406] number overflow parsing '1.18973e+4932'"); } @@ -1155,7 +1155,7 @@ TEST_CASE("parser class") case ('r'): case ('t'): { - CHECK_NOTHROW(parser_helper(s.c_str())); + CHECK_NOTHROW(parser_helper(s)); break; } @@ -1168,11 +1168,11 @@ TEST_CASE("parser class") // any other combination of backslash and character is invalid default: { - CHECK_THROWS_AS(parser_helper(s.c_str()), json::parse_error&); + CHECK_THROWS_AS(parser_helper(s), json::parse_error&); // only check error message if c is not a control character if (c > 0x1f) { - CHECK_THROWS_WITH_STD_STR(parser_helper(s.c_str()), + CHECK_THROWS_WITH_STD_STR(parser_helper(s), "[json.exception.parse_error.101] parse error at line 1, column 3: syntax error while parsing value - invalid string: forbidden character after backslash; last read: '\"\\" + std::string(1, static_cast(c)) + "'"); } break; @@ -1233,49 +1233,49 @@ TEST_CASE("parser class") if (valid(c)) { CAPTURE(s1) - CHECK_NOTHROW(parser_helper(s1.c_str())); + CHECK_NOTHROW(parser_helper(s1)); CAPTURE(s2) - CHECK_NOTHROW(parser_helper(s2.c_str())); + CHECK_NOTHROW(parser_helper(s2)); CAPTURE(s3) - CHECK_NOTHROW(parser_helper(s3.c_str())); + CHECK_NOTHROW(parser_helper(s3)); CAPTURE(s4) - CHECK_NOTHROW(parser_helper(s4.c_str())); + CHECK_NOTHROW(parser_helper(s4)); } else { CAPTURE(s1) - CHECK_THROWS_AS(parser_helper(s1.c_str()), json::parse_error&); + CHECK_THROWS_AS(parser_helper(s1), json::parse_error&); // only check error message if c is not a control character if (c > 0x1f) { - CHECK_THROWS_WITH_STD_STR(parser_helper(s1.c_str()), + CHECK_THROWS_WITH_STD_STR(parser_helper(s1), "[json.exception.parse_error.101] parse error at line 1, column 7: syntax error while parsing value - invalid string: '\\u' must be followed by 4 hex digits; last read: '" + s1.substr(0, 7) + "'"); } CAPTURE(s2) - CHECK_THROWS_AS(parser_helper(s2.c_str()), json::parse_error&); + CHECK_THROWS_AS(parser_helper(s2), json::parse_error&); // only check error message if c is not a control character if (c > 0x1f) { - CHECK_THROWS_WITH_STD_STR(parser_helper(s2.c_str()), + CHECK_THROWS_WITH_STD_STR(parser_helper(s2), "[json.exception.parse_error.101] parse error at line 1, column 6: syntax error while parsing value - invalid string: '\\u' must be followed by 4 hex digits; last read: '" + s2.substr(0, 6) + "'"); } CAPTURE(s3) - CHECK_THROWS_AS(parser_helper(s3.c_str()), json::parse_error&); + CHECK_THROWS_AS(parser_helper(s3), json::parse_error&); // only check error message if c is not a control character if (c > 0x1f) { - CHECK_THROWS_WITH_STD_STR(parser_helper(s3.c_str()), + CHECK_THROWS_WITH_STD_STR(parser_helper(s3), "[json.exception.parse_error.101] parse error at line 1, column 5: syntax error while parsing value - invalid string: '\\u' must be followed by 4 hex digits; last read: '" + s3.substr(0, 5) + "'"); } CAPTURE(s4) - CHECK_THROWS_AS(parser_helper(s4.c_str()), json::parse_error&); + CHECK_THROWS_AS(parser_helper(s4), json::parse_error&); // only check error message if c is not a control character if (c > 0x1f) { - CHECK_THROWS_WITH_STD_STR(parser_helper(s4.c_str()), + CHECK_THROWS_WITH_STD_STR(parser_helper(s4), "[json.exception.parse_error.101] parse error at line 1, column 4: syntax error while parsing value - invalid string: '\\u' must be followed by 4 hex digits; last read: '" + s4.substr(0, 4) + "'"); } } @@ -1381,7 +1381,7 @@ TEST_CASE("parser class") case ('r'): case ('t'): { - CHECK(json::parser(nlohmann::detail::input_adapter(std::string(s.c_str()))).accept()); + CHECK(json::parser(nlohmann::detail::input_adapter(std::string(s))).accept()); break; } @@ -1394,7 +1394,7 @@ TEST_CASE("parser class") // any other combination of backslash and character is invalid default: { - CHECK(json::parser(nlohmann::detail::input_adapter(std::string(s.c_str()))).accept() == false); + CHECK(json::parser(nlohmann::detail::input_adapter(std::string(s))).accept() == false); break; } } @@ -1453,27 +1453,27 @@ TEST_CASE("parser class") if (valid(c)) { CAPTURE(s1) - CHECK(json::parser(nlohmann::detail::input_adapter(std::string(s1.c_str()))).accept()); + CHECK(json::parser(nlohmann::detail::input_adapter(std::string(s1))).accept()); CAPTURE(s2) - CHECK(json::parser(nlohmann::detail::input_adapter(std::string(s2.c_str()))).accept()); + CHECK(json::parser(nlohmann::detail::input_adapter(std::string(s2))).accept()); CAPTURE(s3) - CHECK(json::parser(nlohmann::detail::input_adapter(std::string(s3.c_str()))).accept()); + CHECK(json::parser(nlohmann::detail::input_adapter(std::string(s3))).accept()); CAPTURE(s4) - CHECK(json::parser(nlohmann::detail::input_adapter(std::string(s4.c_str()))).accept()); + CHECK(json::parser(nlohmann::detail::input_adapter(std::string(s4))).accept()); } else { CAPTURE(s1) - CHECK(json::parser(nlohmann::detail::input_adapter(std::string(s1.c_str()))).accept() == false); + CHECK(json::parser(nlohmann::detail::input_adapter(std::string(s1))).accept() == false); CAPTURE(s2) - CHECK(json::parser(nlohmann::detail::input_adapter(std::string(s2.c_str()))).accept() == false); + CHECK(json::parser(nlohmann::detail::input_adapter(std::string(s2))).accept() == false); CAPTURE(s3) - CHECK(json::parser(nlohmann::detail::input_adapter(std::string(s3.c_str()))).accept() == false); + CHECK(json::parser(nlohmann::detail::input_adapter(std::string(s3))).accept() == false); CAPTURE(s4) - CHECK(json::parser(nlohmann::detail::input_adapter(std::string(s4.c_str()))).accept() == false); + CHECK(json::parser(nlohmann::detail::input_adapter(std::string(s4))).accept() == false); } } } @@ -1499,16 +1499,9 @@ TEST_CASE("parser class") // test case to make sure the callback is properly evaluated after reading a key { - json::parser_callback_t cb = [](int, json::parse_event_t event, json&) + json::parser_callback_t cb = [](int /*unused*/, json::parse_event_t event, json& /*unused*/) { - if (event == json::parse_event_t::key) - { - return false; - } - else - { - return true; - } + return event != json::parse_event_t::key; }; json x = json::parse("{\"key\": false}", cb); @@ -1518,7 +1511,7 @@ TEST_CASE("parser class") SECTION("callback function") { - auto s_object = R"( + const auto* s_object = R"( { "foo": 2, "bar": { @@ -1527,11 +1520,11 @@ TEST_CASE("parser class") } )"; - auto s_array = R"( + const auto* s_array = R"( [1,2,[3,4,5],4,5] )"; - auto structured_array = R"( + const auto* structured_array = R"( [ 1, { @@ -1545,14 +1538,14 @@ TEST_CASE("parser class") SECTION("filter nothing") { - json j_object = json::parse(s_object, [](int, json::parse_event_t, const json&) + json j_object = json::parse(s_object, [](int /*unused*/, json::parse_event_t /*unused*/, const json& /*unused*/) { return true; }); CHECK (j_object == json({{"foo", 2}, {"bar", {{"baz", 1}}}})); - json j_array = json::parse(s_array, [](int, json::parse_event_t, const json&) + json j_array = json::parse(s_array, [](int /*unused*/, json::parse_event_t /*unused*/, const json& /*unused*/) { return true; }); @@ -1562,7 +1555,7 @@ TEST_CASE("parser class") SECTION("filter everything") { - json j_object = json::parse(s_object, [](int, json::parse_event_t, const json&) + json j_object = json::parse(s_object, [](int /*unused*/, json::parse_event_t /*unused*/, const json& /*unused*/) { return false; }); @@ -1570,7 +1563,7 @@ TEST_CASE("parser class") // the top-level object will be discarded, leaving a null CHECK (j_object.is_null()); - json j_array = json::parse(s_array, [](int, json::parse_event_t, const json&) + json j_array = json::parse(s_array, [](int /*unused*/, json::parse_event_t /*unused*/, const json& /*unused*/) { return false; }); @@ -1581,31 +1574,17 @@ TEST_CASE("parser class") SECTION("filter specific element") { - json j_object = json::parse(s_object, [](int, json::parse_event_t, const json & j) + json j_object = json::parse(s_object, [](int /*unused*/, json::parse_event_t /*unused*/, const json & j) { // filter all number(2) elements - if (j == json(2)) - { - return false; - } - else - { - return true; - } + return j != json(2); }); CHECK (j_object == json({{"bar", {{"baz", 1}}}})); - json j_array = json::parse(s_array, [](int, json::parse_event_t, const json & j) + json j_array = json::parse(s_array, [](int /*unused*/, json::parse_event_t /*unused*/, const json & j) { - if (j == json(2)) - { - return false; - } - else - { - return true; - } + return j != json(2); }); CHECK (j_array == json({1, {3, 4, 5}, 4, 5})); @@ -1613,32 +1592,18 @@ TEST_CASE("parser class") SECTION("filter object in array") { - json j_filtered1 = json::parse(structured_array, [](int, json::parse_event_t e, const json & parsed) + json j_filtered1 = json::parse(structured_array, [](int /*unused*/, json::parse_event_t e, const json & parsed) { - if (e == json::parse_event_t::object_end && parsed.contains("foo")) - { - return false; - } - else - { - return true; - } + return !(e == json::parse_event_t::object_end && parsed.contains("foo")); }); // the specified object will be discarded, and removed. CHECK (j_filtered1.size() == 2); CHECK (j_filtered1 == json({1, {{"qux", "baz"}}})); - json j_filtered2 = json::parse(structured_array, [](int, json::parse_event_t e, const json& /*parsed*/) + json j_filtered2 = json::parse(structured_array, [](int /*unused*/, json::parse_event_t e, const json& /*parsed*/) { - if (e == json::parse_event_t::object_end) - { - return false; - } - else - { - return true; - } + return e != json::parse_event_t::object_end; }); // removed all objects in array. @@ -1651,7 +1616,7 @@ TEST_CASE("parser class") SECTION("first closing event") { { - json j_object = json::parse(s_object, [](int, json::parse_event_t e, const json&) + json j_object = json::parse(s_object, [](int /*unused*/, json::parse_event_t e, const json& /*unused*/) { static bool first = true; if (e == json::parse_event_t::object_end && first) @@ -1659,10 +1624,8 @@ TEST_CASE("parser class") first = false; return false; } - else - { - return true; - } + + return true; }); // the first completed object will be discarded @@ -1670,7 +1633,7 @@ TEST_CASE("parser class") } { - json j_array = json::parse(s_array, [](int, json::parse_event_t e, const json&) + json j_array = json::parse(s_array, [](int /*unused*/, json::parse_event_t e, const json& /*unused*/) { static bool first = true; if (e == json::parse_event_t::array_end && first) @@ -1678,10 +1641,8 @@ TEST_CASE("parser class") first = false; return false; } - else - { - return true; - } + + return true; }); // the first completed array will be discarded @@ -1696,29 +1657,15 @@ TEST_CASE("parser class") // object and array is discarded only after the closing character // has been read - json j_empty_object = json::parse("{}", [](int, json::parse_event_t e, const json&) + json j_empty_object = json::parse("{}", [](int /*unused*/, json::parse_event_t e, const json& /*unused*/) { - if (e == json::parse_event_t::object_end) - { - return false; - } - else - { - return true; - } + return e != json::parse_event_t::object_end; }); CHECK(j_empty_object == json()); - json j_empty_array = json::parse("[]", [](int, json::parse_event_t e, const json&) + json j_empty_array = json::parse("[]", [](int /*unused*/, json::parse_event_t e, const json& /*unused*/) { - if (e == json::parse_event_t::array_end) - { - return false; - } - else - { - return true; - } + return e != json::parse_event_t::array_end; }); CHECK(j_empty_array == json()); } @@ -1784,7 +1731,7 @@ TEST_CASE("parser class") { SECTION("parser with callback") { - json::parser_callback_t cb = [](int, json::parse_event_t, json&) + json::parser_callback_t cb = [](int /*unused*/, json::parse_event_t /*unused*/, json& /*unused*/) { return true; }; diff --git a/test/src/unit-constructor1.cpp b/test/src/unit-constructor1.cpp index 70b3e40470..6838dd0e52 100644 --- a/test/src/unit-constructor1.cpp +++ b/test/src/unit-constructor1.cpp @@ -794,7 +794,7 @@ TEST_CASE("constructors") SECTION("integer literal with l suffix") { - json j(42l); + json j(42L); CHECK(j.type() == json::value_t::number_integer); CHECK(j == j_reference); } @@ -808,7 +808,7 @@ TEST_CASE("constructors") SECTION("integer literal with ll suffix") { - json j(42ll); + json j(42LL); CHECK(j.type() == json::value_t::number_integer); CHECK(j == j_reference); } @@ -892,7 +892,7 @@ TEST_CASE("constructors") SECTION("long double") { - long double n = 42.23l; + long double n = 42.23L; json j(n); CHECK(j.type() == json::value_t::number_float); CHECK(j.m_value.number_float == Approx(j_reference.m_value.number_float)); @@ -914,7 +914,7 @@ TEST_CASE("constructors") SECTION("integer literal with l suffix") { - json j(42.23l); + json j(42.23L); CHECK(j.type() == json::value_t::number_float); CHECK(j.m_value.number_float == Approx(j_reference.m_value.number_float)); } diff --git a/test/src/unit-conversions.cpp b/test/src/unit-conversions.cpp index 7f59c63ecf..cc6c7d0787 100644 --- a/test/src/unit-conversions.cpp +++ b/test/src/unit-conversions.cpp @@ -633,7 +633,7 @@ TEST_CASE("value conversion") SECTION("boolean_t") { - json::boolean_t b = j.get(); + auto b = j.get(); CHECK(json(b) == j); } @@ -726,25 +726,25 @@ TEST_CASE("value conversion") SECTION("number_integer_t") { - json::number_integer_t n = j.get(); + auto n = j.get(); CHECK(json(n) == j); } SECTION("number_unsigned_t") { - json::number_unsigned_t n = j_unsigned.get(); + auto n = j_unsigned.get(); CHECK(json(n) == j_unsigned); } SECTION("short") { - short n = j.get(); + auto n = j.get(); CHECK(json(n) == j); } SECTION("unsigned short") { - unsigned short n = j.get(); + auto n = j.get(); CHECK(json(n) == j); } @@ -756,7 +756,7 @@ TEST_CASE("value conversion") SECTION("unsigned int") { - unsigned int n = j.get(); + auto n = j.get(); CHECK(json(n) == j); } @@ -768,163 +768,163 @@ TEST_CASE("value conversion") SECTION("unsigned long") { - unsigned long n = j.get(); + auto n = j.get(); CHECK(json(n) == j); } SECTION("long long") { - long long n = j.get(); + auto n = j.get(); CHECK(json(n) == j); } SECTION("unsigned long long") { - unsigned long long n = j.get(); + auto n = j.get(); CHECK(json(n) == j); } SECTION("int8_t") { - int8_t n = j.get(); + auto n = j.get(); CHECK(json(n) == j); } SECTION("int16_t") { - int16_t n = j.get(); + auto n = j.get(); CHECK(json(n) == j); } SECTION("int32_t") { - int32_t n = j.get(); + auto n = j.get(); CHECK(json(n) == j); } SECTION("int64_t") { - int64_t n = j.get(); + auto n = j.get(); CHECK(json(n) == j); } SECTION("int8_fast_t") { - int_fast8_t n = j.get(); + auto n = j.get(); CHECK(json(n) == j); } SECTION("int16_fast_t") { - int_fast16_t n = j.get(); + auto n = j.get(); CHECK(json(n) == j); } SECTION("int32_fast_t") { - int_fast32_t n = j.get(); + auto n = j.get(); CHECK(json(n) == j); } SECTION("int64_fast_t") { - int_fast64_t n = j.get(); + auto n = j.get(); CHECK(json(n) == j); } SECTION("int8_least_t") { - int_least8_t n = j.get(); + auto n = j.get(); CHECK(json(n) == j); } SECTION("int16_least_t") { - int_least16_t n = j.get(); + auto n = j.get(); CHECK(json(n) == j); } SECTION("int32_least_t") { - int_least32_t n = j.get(); + auto n = j.get(); CHECK(json(n) == j); } SECTION("int64_least_t") { - int_least64_t n = j.get(); + auto n = j.get(); CHECK(json(n) == j); } SECTION("uint8_t") { - uint8_t n = j.get(); + auto n = j.get(); CHECK(json(n) == j); } SECTION("uint16_t") { - uint16_t n = j.get(); + auto n = j.get(); CHECK(json(n) == j); } SECTION("uint32_t") { - uint32_t n = j.get(); + auto n = j.get(); CHECK(json(n) == j); } SECTION("uint64_t") { - uint64_t n = j.get(); + auto n = j.get(); CHECK(json(n) == j); } SECTION("uint8_fast_t") { - uint_fast8_t n = j.get(); + auto n = j.get(); CHECK(json(n) == j); } SECTION("uint16_fast_t") { - uint_fast16_t n = j.get(); + auto n = j.get(); CHECK(json(n) == j); } SECTION("uint32_fast_t") { - uint_fast32_t n = j.get(); + auto n = j.get(); CHECK(json(n) == j); } SECTION("uint64_fast_t") { - uint_fast64_t n = j.get(); + auto n = j.get(); CHECK(json(n) == j); } SECTION("uint8_least_t") { - uint_least8_t n = j.get(); + auto n = j.get(); CHECK(json(n) == j); } SECTION("uint16_least_t") { - uint_least16_t n = j.get(); + auto n = j.get(); CHECK(json(n) == j); } SECTION("uint32_least_t") { - uint_least32_t n = j.get(); + auto n = j.get(); CHECK(json(n) == j); } SECTION("uint64_least_t") { - uint_least64_t n = j.get(); + auto n = j.get(); CHECK(json(n) == j); } @@ -976,13 +976,13 @@ TEST_CASE("value conversion") SECTION("number_integer_t") { - json::number_integer_t n = j.get(); + auto n = j.get(); CHECK(json(n) == j); } SECTION("number_unsigned_t") { - json::number_unsigned_t n = j_unsigned.get(); + auto n = j_unsigned.get(); CHECK(json(n) == j_unsigned); } @@ -1187,19 +1187,19 @@ TEST_CASE("value conversion") SECTION("number_float_t") { - json::number_float_t n = j.get(); + auto n = j.get(); CHECK(json(n).m_value.number_float == Approx(j.m_value.number_float)); } SECTION("float") { - float n = j.get(); + auto n = j.get(); CHECK(json(n).m_value.number_float == Approx(j.m_value.number_float)); } SECTION("double") { - double n = j.get(); + auto n = j.get(); CHECK(json(n).m_value.number_float == Approx(j.m_value.number_float)); } diff --git a/test/src/unit-deserialization.cpp b/test/src/unit-deserialization.cpp index d2db7e80c3..6b7023a9ee 100644 --- a/test/src/unit-deserialization.cpp +++ b/test/src/unit-deserialization.cpp @@ -42,13 +42,13 @@ struct SaxEventLogger : public nlohmann::json_sax { bool null() override { - events.push_back("null()"); + events.emplace_back("null()"); return true; } bool boolean(bool val) override { - events.push_back(val ? "boolean(true)" : "boolean(false)"); + events.emplace_back(val ? "boolean(true)" : "boolean(false)"); return true; } @@ -64,7 +64,7 @@ struct SaxEventLogger : public nlohmann::json_sax return true; } - bool number_float(json::number_float_t, const std::string& s) override + bool number_float(json::number_float_t /*val*/, const std::string& s) override { events.push_back("number_float(" + s + ")"); return true; @@ -79,7 +79,7 @@ struct SaxEventLogger : public nlohmann::json_sax bool binary(json::binary_t& val) override { std::string binary_contents = "binary("; - std::string comma_space = ""; + std::string comma_space; for (auto b : val) { binary_contents.append(comma_space); @@ -95,7 +95,7 @@ struct SaxEventLogger : public nlohmann::json_sax { if (elements == std::size_t(-1)) { - events.push_back("start_object()"); + events.emplace_back("start_object()"); } else { @@ -112,7 +112,7 @@ struct SaxEventLogger : public nlohmann::json_sax bool end_object() override { - events.push_back("end_object()"); + events.emplace_back("end_object()"); return true; } @@ -120,7 +120,7 @@ struct SaxEventLogger : public nlohmann::json_sax { if (elements == std::size_t(-1)) { - events.push_back("start_array()"); + events.emplace_back("start_array()"); } else { @@ -131,11 +131,11 @@ struct SaxEventLogger : public nlohmann::json_sax bool end_array() override { - events.push_back("end_array()"); + events.emplace_back("end_array()"); return true; } - bool parse_error(std::size_t position, const std::string&, const json::exception&) override + bool parse_error(std::size_t position, const std::string& /*last_token*/, const json::exception& /*ex*/) override { events.push_back("parse_error(" + std::to_string(position) + ")"); return false; @@ -150,7 +150,7 @@ struct SaxEventLoggerExitAfterStartObject : public SaxEventLogger { if (elements == std::size_t(-1)) { - events.push_back("start_object()"); + events.emplace_back("start_object()"); } else { @@ -175,7 +175,7 @@ struct SaxEventLoggerExitAfterStartArray : public SaxEventLogger { if (elements == std::size_t(-1)) { - events.push_back("start_array()"); + events.emplace_back("start_array()"); } else { @@ -184,7 +184,7 @@ struct SaxEventLoggerExitAfterStartArray : public SaxEventLogger return false; } }; -} +} // namespace TEST_CASE("deserialization") { @@ -192,10 +192,12 @@ TEST_CASE("deserialization") { SECTION("stream") { - std::stringstream ss1, ss2, ss3; - ss1 << "[\"foo\",1,2,3,false,{\"one\":1}]"; - ss2 << "[\"foo\",1,2,3,false,{\"one\":1}]"; - ss3 << "[\"foo\",1,2,3,false,{\"one\":1}]"; + std::stringstream ss1; + std::stringstream ss2; + std::stringstream ss3; + ss1 << R"(["foo",1,2,3,false,{"one":1}])"; + ss2 << R"(["foo",1,2,3,false,{"one":1}])"; + ss3 << R"(["foo",1,2,3,false,{"one":1}])"; json j = json::parse(ss1); CHECK(json::accept(ss2)); CHECK(j == json({"foo", 1, 2, 3, false, {{"one", 1}}})); @@ -214,7 +216,7 @@ TEST_CASE("deserialization") SECTION("string literal") { - auto s = "[\"foo\",1,2,3,false,{\"one\":1}]"; + const auto* s = R"(["foo",1,2,3,false,{"one":1}])"; json j = json::parse(s); CHECK(json::accept(s)); CHECK(j == json({"foo", 1, 2, 3, false, {{"one", 1}}})); @@ -233,7 +235,7 @@ TEST_CASE("deserialization") SECTION("string_t") { - json::string_t s = "[\"foo\",1,2,3,false,{\"one\":1}]"; + json::string_t s = R"(["foo",1,2,3,false,{"one":1}])"; json j = json::parse(s); CHECK(json::accept(s)); CHECK(j == json({"foo", 1, 2, 3, false, {{"one", 1}}})); @@ -253,7 +255,7 @@ TEST_CASE("deserialization") SECTION("operator<<") { std::stringstream ss; - ss << "[\"foo\",1,2,3,false,{\"one\":1}]"; + ss << R"(["foo",1,2,3,false,{"one":1}])"; json j; j << ss; CHECK(j == json({"foo", 1, 2, 3, false, {{"one", 1}}})); @@ -262,7 +264,7 @@ TEST_CASE("deserialization") SECTION("operator>>") { std::stringstream ss; - ss << "[\"foo\",1,2,3,false,{\"one\":1}]"; + ss << R"(["foo",1,2,3,false,{"one":1}])"; json j; ss >> j; CHECK(j == json({"foo", 1, 2, 3, false, {{"one", 1}}})); @@ -278,12 +280,16 @@ TEST_CASE("deserialization") { SECTION("stream") { - std::stringstream ss1, ss2, ss3, ss4, ss5; - ss1 << "[\"foo\",1,2,3,false,{\"one\":1}"; - ss2 << "[\"foo\",1,2,3,false,{\"one\":1}"; - ss3 << "[\"foo\",1,2,3,false,{\"one\":1}"; - ss4 << "[\"foo\",1,2,3,false,{\"one\":1}"; - ss5 << "[\"foo\",1,2,3,false,{\"one\":1}"; + std::stringstream ss1; + std::stringstream ss2; + std::stringstream ss3; + std::stringstream ss4; + std::stringstream ss5; + ss1 << R"(["foo",1,2,3,false,{"one":1})"; + ss2 << R"(["foo",1,2,3,false,{"one":1})"; + ss3 << R"(["foo",1,2,3,false,{"one":1})"; + ss4 << R"(["foo",1,2,3,false,{"one":1})"; + ss5 << R"(["foo",1,2,3,false,{"one":1})"; json _; CHECK_THROWS_AS(_ = json::parse(ss1), json::parse_error&); @@ -309,7 +315,7 @@ TEST_CASE("deserialization") SECTION("string") { - json::string_t s = "[\"foo\",1,2,3,false,{\"one\":1}"; + json::string_t s = R"(["foo",1,2,3,false,{"one":1})"; json _; CHECK_THROWS_AS(_ = json::parse(s), json::parse_error&); CHECK_THROWS_WITH(_ = json::parse(s), @@ -334,9 +340,10 @@ TEST_CASE("deserialization") SECTION("operator<<") { - std::stringstream ss1, ss2; - ss1 << "[\"foo\",1,2,3,false,{\"one\":1}"; - ss2 << "[\"foo\",1,2,3,false,{\"one\":1}"; + std::stringstream ss1; + std::stringstream ss2; + ss1 << R"(["foo",1,2,3,false,{"one":1})"; + ss2 << R"(["foo",1,2,3,false,{"one":1})"; json j; CHECK_THROWS_AS(j << ss1, json::parse_error&); CHECK_THROWS_WITH(j << ss2, @@ -345,9 +352,10 @@ TEST_CASE("deserialization") SECTION("operator>>") { - std::stringstream ss1, ss2; - ss1 << "[\"foo\",1,2,3,false,{\"one\":1}"; - ss2 << "[\"foo\",1,2,3,false,{\"one\":1}"; + std::stringstream ss1; + std::stringstream ss2; + ss1 << R"(["foo",1,2,3,false,{"one":1})"; + ss2 << R"(["foo",1,2,3,false,{"one":1})"; json j; CHECK_THROWS_AS(ss1 >> j, json::parse_error&); CHECK_THROWS_WITH(ss2 >> j, @@ -404,7 +412,7 @@ TEST_CASE("deserialization") SECTION("from chars") { - uint8_t* v = new uint8_t[5]; + auto* v = new uint8_t[5]; v[0] = 't'; v[1] = 'r'; v[2] = 'u'; @@ -860,7 +868,8 @@ TEST_CASE("deserialization") CHECK(json::parse(bom + "1") == 1); CHECK(json::parse(std::istringstream(bom + "1")) == 1); - SaxEventLogger l1, l2; + SaxEventLogger l1; + SaxEventLogger l2; CHECK(json::sax_parse(std::istringstream(bom + "1"), &l1)); CHECK(json::sax_parse(bom + "1", &l2)); CHECK(l1.events.size() == 1); @@ -886,7 +895,8 @@ TEST_CASE("deserialization") CHECK_THROWS_WITH(_ = json::parse(std::istringstream(bom.substr(0, 2))), "[json.exception.parse_error.101] parse error at line 1, column 3: syntax error while parsing value - invalid BOM; must be 0xEF 0xBB 0xBF if given; last read: '\xEF\xBB'"); - SaxEventLogger l1, l2; + SaxEventLogger l1; + SaxEventLogger l2; CHECK(!json::sax_parse(std::istringstream(bom.substr(0, 2)), &l1)); CHECK(!json::sax_parse(bom.substr(0, 2), &l2)); CHECK(l1.events.size() == 1); @@ -912,7 +922,8 @@ TEST_CASE("deserialization") CHECK_THROWS_WITH(_ = json::parse(std::istringstream(bom.substr(0, 1))), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid BOM; must be 0xEF 0xBB 0xBF if given; last read: '\xEF'"); - SaxEventLogger l1, l2; + SaxEventLogger l1; + SaxEventLogger l2; CHECK(!json::sax_parse(std::istringstream(bom.substr(0, 1)), &l1)); CHECK(!json::sax_parse(bom.substr(0, 1), &l2)); CHECK(l1.events.size() == 1); @@ -942,7 +953,7 @@ TEST_CASE("deserialization") CAPTURE(i1) CAPTURE(i2) - std::string s = ""; + std::string s; s.push_back(static_cast(bom[0] + i0)); s.push_back(static_cast(bom[1] + i1)); s.push_back(static_cast(bom[2] + i2)); @@ -1012,7 +1023,7 @@ TEST_CASE("deserialization") SECTION("SAX and early abort") { - std::string s = "[1, [\"string\", 43.12], null, {\"key1\": true, \"key2\": false}]"; + std::string s = R"([1, ["string", 43.12], null, {"key1": true, "key2": false}])"; SaxEventLogger default_logger; SaxEventLoggerExitAfterStartObject exit_after_start_object; diff --git a/test/src/unit-msgpack.cpp b/test/src/unit-msgpack.cpp index d3c60cf9ad..31a22fedcf 100644 --- a/test/src/unit-msgpack.cpp +++ b/test/src/unit-msgpack.cpp @@ -115,7 +115,7 @@ class SaxCountdown private: int events_left = 0; }; -} +} // namespace TEST_CASE("MessagePack") { @@ -485,7 +485,7 @@ TEST_CASE("MessagePack") numbers.push_back(-65536); numbers.push_back(-77777); numbers.push_back(-1048576); - numbers.push_back(-2147483648ll); + numbers.push_back(-2147483648LL); for (auto i : numbers) { CAPTURE(i) @@ -527,7 +527,7 @@ TEST_CASE("MessagePack") { std::vector numbers; numbers.push_back(INT64_MIN); - numbers.push_back(-2147483649ll); + numbers.push_back(-2147483649LL); for (auto i : numbers) { CAPTURE(i) diff --git a/test/src/unit-noexcept.cpp b/test/src/unit-noexcept.cpp index 7e657bf9fb..b4bd801c52 100644 --- a/test/src/unit-noexcept.cpp +++ b/test/src/unit-noexcept.cpp @@ -42,14 +42,14 @@ enum test struct pod {}; struct pod_bis {}; -void to_json(json&, pod) noexcept; -void to_json(json&, pod_bis); -void from_json(const json&, pod) noexcept; -void from_json(const json&, pod_bis); -void to_json(json&, pod) noexcept {} -void to_json(json&, pod_bis) {} -void from_json(const json&, pod) noexcept {} -void from_json(const json&, pod_bis) {} +void to_json(json& /*unused*/, pod /*unused*/) noexcept; +void to_json(json& /*unused*/, pod_bis /*unused*/); +void from_json(const json& /*unused*/, pod /*unused*/) noexcept; +void from_json(const json& /*unused*/, pod_bis /*unused*/); +void to_json(json& /*unused*/, pod /*unused*/) noexcept {} +void to_json(json& /*unused*/, pod_bis /*unused*/) {} +void from_json(const json& /*unused*/, pod /*unused*/) noexcept {} +void from_json(const json& /*unused*/, pod_bis /*unused*/) {} static json* j = nullptr; @@ -66,7 +66,7 @@ static_assert(noexcept(json(pod{})), ""); static_assert(noexcept(j->get()), ""); static_assert(!noexcept(j->get()), ""); static_assert(noexcept(json(pod{})), ""); -} +} // namespace TEST_CASE("runtime checks") { diff --git a/test/src/unit-regression1.cpp b/test/src/unit-regression1.cpp index df660ddb4b..211fc187c5 100644 --- a/test/src/unit-regression1.cpp +++ b/test/src/unit-regression1.cpp @@ -104,7 +104,7 @@ struct foo_serializer < T, typename std::enable_if < !std::is_same::valu ::nlohmann::from_json(j, value); } }; -} +} // namespace ns using foo_json = nlohmann::basic_json>; @@ -127,7 +127,7 @@ struct nocopy j = {{"val", n.val}}; } }; -} +} // namespace TEST_CASE("regression tests 1") { @@ -135,7 +135,7 @@ TEST_CASE("regression tests 1") { SECTION("escape_doublequote") { - auto s = "[\"\\\"foo\\\"\"]"; + const auto* s = "[\"\\\"foo\\\"\"]"; json j = json::parse(s); auto expected = R"(["\"foo\""])"_json; CHECK(j == expected); @@ -245,7 +245,7 @@ TEST_CASE("regression tests 1") SECTION("issue #82 - lexer::get_number return NAN") { - const auto content = R"( + const auto* const content = R"( { "Test":"Test1", "Number":100, @@ -633,7 +633,7 @@ TEST_CASE("regression tests 1") SECTION("issue #306 - Parsing fails without space at end of file") { - for (auto filename : + for (const auto* filename : { TEST_DATA_DIRECTORY "/regression/broken_file.json", TEST_DATA_DIRECTORY "/regression/working_file.json" @@ -648,7 +648,7 @@ TEST_CASE("regression tests 1") SECTION("issue #310 - make json_benchmarks no longer working in 2.0.4") { - for (auto filename : + for (const auto* filename : { TEST_DATA_DIRECTORY "/regression/floats.json", TEST_DATA_DIRECTORY "/regression/signed_ints.json", @@ -1385,8 +1385,10 @@ TEST_CASE("regression tests 1") { SECTION("example 1") { - std::istringstream i1_2_3( "{\"first\": \"one\" }{\"second\": \"two\"}3" ); - json j1, j2, j3; + std::istringstream i1_2_3( R"({"first": "one" }{"second": "two"}3)" ); + json j1; + json j2; + json j3; i1_2_3 >> j1; i1_2_3 >> j2; i1_2_3 >> j3; @@ -1524,7 +1526,7 @@ TEST_CASE("regression tests 1") SECTION("issue #971 - Add a SAX parser - late bug") { // a JSON text - auto text = R"( + const auto* text = R"( { "Image": { "Width": 800, @@ -1545,14 +1547,7 @@ TEST_CASE("regression tests 1") json::parser_callback_t cb = [](int /*depth*/, json::parse_event_t event, json & parsed) { // skip object elements with key "Thumbnail" - if (event == json::parse_event_t::key && parsed == json("Thumbnail")) - { - return false; - } - else - { - return true; - } + return !(event == json::parse_event_t::key && parsed == json("Thumbnail")); }; // parse (with callback) and serialize JSON diff --git a/test/src/unit-regression2.cpp b/test/src/unit-regression2.cpp index 5ea8de63d1..e02f575eff 100644 --- a/test/src/unit-regression2.cpp +++ b/test/src/unit-regression2.cpp @@ -37,11 +37,9 @@ DOCTEST_GCC_SUPPRESS_WARNING("-Wfloat-equal") #include using nlohmann::json; -#include -#include #include #include -#include +#include #if (defined(__cplusplus) && __cplusplus >= 201703L) || (defined(_HAS_CXX17) && _HAS_CXX17 == 1) // fix for issue #464 #define JSON_HAS_CPP_17 @@ -68,7 +66,7 @@ namespace { struct NonDefaultFromJsonStruct { }; -inline bool operator== (NonDefaultFromJsonStruct const&, NonDefaultFromJsonStruct const&) +inline bool operator== (NonDefaultFromJsonStruct const& /*unused*/, NonDefaultFromJsonStruct const& /*unused*/) { return true; } @@ -80,7 +78,7 @@ NLOHMANN_JSON_SERIALIZE_ENUM(for_1647, {for_1647::one, "one"}, {for_1647::two, "two"}, }) -} +} // namespace ///////////////////////////////////////////////////////////////////// // for #1299 @@ -89,7 +87,7 @@ NLOHMANN_JSON_SERIALIZE_ENUM(for_1647, struct Data { Data() = default; - Data(const std::string& a_, const std::string b_) : a(a_), b(b_) {} + Data(std::string a_, const std::string b_) : a(std::move(a_)), b(b_) {} std::string a {}; std::string b {}; }; diff --git a/test/src/unit-serialization.cpp b/test/src/unit-serialization.cpp index 3a733d2ff4..2247d73a95 100644 --- a/test/src/unit-serialization.cpp +++ b/test/src/unit-serialization.cpp @@ -183,10 +183,10 @@ TEST_CASE("serialization") CHECK(to_string(j) == "\"" + expected + "\""); }; - test("{\"x\":5,\"y\":6}", "{\\\"x\\\":5,\\\"y\\\":6}"); - test("{\"x\":[10,null,null,null]}", "{\\\"x\\\":[10,null,null,null]}"); + test(R"({"x":5,"y":6})", R"({\"x\":5,\"y\":6})"); + test("{\"x\":[10,null,null,null]}", R"({\"x\":[10,null,null,null]})"); test("test", "test"); - test("[3,\"false\",false]", "[3,\\\"false\\\",false]"); + test("[3,\"false\",false]", R"([3,\"false\",false])"); } } diff --git a/test/src/unit-testsuites.cpp b/test/src/unit-testsuites.cpp index da8b27d78f..88bb272d69 100644 --- a/test/src/unit-testsuites.cpp +++ b/test/src/unit-testsuites.cpp @@ -41,7 +41,7 @@ TEST_CASE("compliance tests from json.org") SECTION("expected failures") { - for (auto filename : + for (const auto* filename : { //TEST_DATA_DIRECTORY "/json_tests/fail1.json", TEST_DATA_DIRECTORY "/json_tests/fail2.json", @@ -90,7 +90,7 @@ TEST_CASE("compliance tests from json.org") // these tests fail above, because the parser does not end on EOF; // they succeed when the operator>> is used, because it does not // have this constraint - for (auto filename : + for (const auto* filename : { TEST_DATA_DIRECTORY "/json_tests/fail7.json", TEST_DATA_DIRECTORY "/json_tests/fail8.json", @@ -106,7 +106,7 @@ TEST_CASE("compliance tests from json.org") SECTION("expected passes") { - for (auto filename : + for (const auto* filename : { TEST_DATA_DIRECTORY "/json_tests/pass1.json", TEST_DATA_DIRECTORY "/json_tests/pass2.json", @@ -272,20 +272,20 @@ TEST_CASE("compliance tests from nativejson-benchmark") TEST_STRING("[\"\"]", ""); TEST_STRING("[\"Hello\"]", "Hello"); - TEST_STRING("[\"Hello\\nWorld\"]", "Hello\nWorld"); + TEST_STRING(R"(["Hello\nWorld"])", "Hello\nWorld"); //TEST_STRING("[\"Hello\\u0000World\"]", "Hello\0World"); - TEST_STRING("[\"\\\"\\\\/\\b\\f\\n\\r\\t\"]", "\"\\/\b\f\n\r\t"); - TEST_STRING("[\"\\u0024\"]", "\x24"); // Dollar sign U+0024 - TEST_STRING("[\"\\u00A2\"]", "\xC2\xA2"); // Cents sign U+00A2 - TEST_STRING("[\"\\u20AC\"]", "\xE2\x82\xAC"); // Euro sign U+20AC - TEST_STRING("[\"\\uD834\\uDD1E\"]", "\xF0\x9D\x84\x9E"); // G clef sign U+1D11E + TEST_STRING(R"(["\"\\/\b\f\n\r\t"])", "\"\\/\b\f\n\r\t"); + TEST_STRING(R"(["\u0024"])", "$"); // Dollar sign U+0024 + TEST_STRING(R"(["\u00A2"])", "\xC2\xA2"); // Cents sign U+00A2 + TEST_STRING(R"(["\u20AC"])", "\xE2\x82\xAC"); // Euro sign U+20AC + TEST_STRING(R"(["\uD834\uDD1E"])", "\xF0\x9D\x84\x9E"); // G clef sign U+1D11E } SECTION("roundtrip") { // test cases are from https://github.com/miloyip/nativejson-benchmark/tree/master/test/data/roundtrip - for (auto filename : + for (const auto* filename : { TEST_DATA_DIRECTORY "/json_roundtrip/roundtrip01.json", TEST_DATA_DIRECTORY "/json_roundtrip/roundtrip02.json", @@ -441,7 +441,7 @@ TEST_CASE("RFC 7159 examples") SECTION("13 Examples") { { - auto json_contents = R"( + const auto* json_contents = R"( { "Image": { "Width": 800, @@ -462,7 +462,7 @@ TEST_CASE("RFC 7159 examples") } { - auto json_contents = R"( + const auto* json_contents = R"( [ { "precision": "zip", @@ -500,7 +500,7 @@ TEST_CASE("nst's JSONTestSuite") { SECTION("y") { - for (auto filename : + for (const auto* filename : { TEST_DATA_DIRECTORY "/nst_json_testsuite/test_parsing/y_array_arraysWithSpaces.json", TEST_DATA_DIRECTORY "/nst_json_testsuite/test_parsing/y_array_empty-string.json", @@ -610,7 +610,7 @@ TEST_CASE("nst's JSONTestSuite") SECTION("n") { - for (auto filename : + for (const auto* filename : { TEST_DATA_DIRECTORY "/nst_json_testsuite/test_parsing/n_array_1_true_without_comma.json", TEST_DATA_DIRECTORY "/nst_json_testsuite/test_parsing/n_array_a_invalid_utf8.json", @@ -822,7 +822,7 @@ TEST_CASE("nst's JSONTestSuite") // these tests fail above, because the parser does not end on EOF; // they succeed when the operator>> is used, because it does not // have this constraint - for (auto filename : + for (const auto* filename : { TEST_DATA_DIRECTORY "/nst_json_testsuite/test_parsing/n_array_comma_after_close.json", TEST_DATA_DIRECTORY "/nst_json_testsuite/test_parsing/n_array_extra_close.json", @@ -852,7 +852,7 @@ TEST_CASE("nst's JSONTestSuite") SECTION("i -> y") { - for (auto filename : + for (const auto* filename : { // we do not pose a limit on nesting TEST_DATA_DIRECTORY "/nst_json_testsuite/test_parsing/i_structure_500_nested_arrays.json", @@ -876,7 +876,7 @@ TEST_CASE("nst's JSONTestSuite") // numbers that overflow during parsing SECTION("i/y -> n (out of range)") { - for (auto filename : + for (const auto* filename : { TEST_DATA_DIRECTORY "/nst_json_testsuite/test_parsing/i_number_neg_int_huge_exp.json", TEST_DATA_DIRECTORY "/nst_json_testsuite/test_parsing/i_number_pos_double_huge_exp.json", @@ -895,7 +895,7 @@ TEST_CASE("nst's JSONTestSuite") SECTION("i -> n") { - for (auto filename : + for (const auto* filename : { TEST_DATA_DIRECTORY "/nst_json_testsuite/test_parsing/i_object_key_lone_2nd_surrogate.json", TEST_DATA_DIRECTORY "/nst_json_testsuite/test_parsing/i_string_1st_surrogate_but_2nd_missing.json", @@ -928,7 +928,7 @@ TEST_CASE("nst's JSONTestSuite (2)") { SECTION("y") { - for (auto filename : + for (const auto* filename : { TEST_DATA_DIRECTORY "/nst_json_testsuite2/test_parsing/y_array_arraysWithSpaces.json", TEST_DATA_DIRECTORY "/nst_json_testsuite2/test_parsing/y_array_empty-string.json", @@ -1039,7 +1039,7 @@ TEST_CASE("nst's JSONTestSuite (2)") SECTION("n") { - for (auto filename : + for (const auto* filename : { TEST_DATA_DIRECTORY "/nst_json_testsuite2/test_parsing/n_array_1_true_without_comma.json", TEST_DATA_DIRECTORY "/nst_json_testsuite2/test_parsing/n_array_a_invalid_utf8.json", @@ -1241,7 +1241,7 @@ TEST_CASE("nst's JSONTestSuite (2)") SECTION("n (previously overflowed)") { - for (auto filename : + for (const auto* filename : { TEST_DATA_DIRECTORY "/nst_json_testsuite2/test_parsing/n_structure_100000_opening_arrays.json", TEST_DATA_DIRECTORY "/nst_json_testsuite2/test_parsing/n_structure_open_array_object.json" @@ -1256,7 +1256,7 @@ TEST_CASE("nst's JSONTestSuite (2)") SECTION("i -> y") { - for (auto filename : + for (const auto* filename : { TEST_DATA_DIRECTORY "/nst_json_testsuite2/test_parsing/i_number_double_huge_neg_exp.json", //TEST_DATA_DIRECTORY "/nst_json_testsuite2/test_parsing/i_number_huge_exp.json", @@ -1307,7 +1307,7 @@ TEST_CASE("nst's JSONTestSuite (2)") SECTION("i -> n") { - for (auto filename : + for (const auto* filename : { //TEST_DATA_DIRECTORY "/nst_json_testsuite2/test_parsing/i_number_double_huge_neg_exp.json", TEST_DATA_DIRECTORY "/nst_json_testsuite2/test_parsing/i_number_huge_exp.json", @@ -1373,7 +1373,7 @@ std::string trim(const std::string& str) size_t last = str.find_last_not_of(' '); return str.substr(first, (last - first + 1)); } -} +} // namespace TEST_CASE("Big List of Naughty Strings") { @@ -1399,7 +1399,7 @@ TEST_CASE("Big List of Naughty Strings") line = trim(line); // remove trailing comma - line = line.substr(0, line.find_last_of(",")); + line = line.substr(0, line.find_last_of(',')); // discard lines without at least two characters (quotes) if (line.size() < 2) diff --git a/test/src/unit-to_chars.cpp b/test/src/unit-to_chars.cpp index b94b087559..8ae05cf952 100644 --- a/test/src/unit-to_chars.cpp +++ b/test/src/unit-to_chars.cpp @@ -54,7 +54,7 @@ float make_float(uint32_t sign_bit, uint32_t biased_exponent, uint32_t significa } // ldexp -- convert f * 2^e to IEEE single precision -static float make_float(uint64_t f, int e) +float make_float(uint64_t f, int e) { constexpr uint64_t kHiddenBit = 0x00800000; constexpr uint64_t kSignificandMask = 0x007FFFFF; @@ -90,7 +90,7 @@ static float make_float(uint64_t f, int e) return reinterpret_bits(static_cast(bits)); } -static double make_double(uint64_t sign_bit, uint64_t biased_exponent, uint64_t significand) +double make_double(uint64_t sign_bit, uint64_t biased_exponent, uint64_t significand) { assert(sign_bit == 0 || sign_bit == 1); assert(biased_exponent <= 0x7FF); @@ -106,7 +106,7 @@ static double make_double(uint64_t sign_bit, uint64_t biased_exponent, uint64_t } // ldexp -- convert f * 2^e to IEEE double precision -static double make_double(uint64_t f, int e) +double make_double(uint64_t f, int e) { constexpr uint64_t kHiddenBit = 0x0010000000000000; constexpr uint64_t kSignificandMask = 0x000FFFFFFFFFFFFF; @@ -141,7 +141,7 @@ static double make_double(uint64_t f, int e) uint64_t bits = (f & kSignificandMask) | (biased_exponent << kPhysicalSignificandSize); return reinterpret_bits(bits); } -} +} // namespace TEST_CASE("digit gen") { diff --git a/test/src/unit-ubjson.cpp b/test/src/unit-ubjson.cpp index f60477a825..3abb644e1c 100644 --- a/test/src/unit-ubjson.cpp +++ b/test/src/unit-ubjson.cpp @@ -302,7 +302,7 @@ TEST_CASE("UBJSON") // check individual bytes CHECK(result[0] == 'I'); - int16_t restored = static_cast(((result[1] << 8) + result[2])); + auto restored = static_cast(((result[1] << 8) + result[2])); CHECK(restored == i); // roundtrip @@ -323,7 +323,7 @@ TEST_CASE("UBJSON") // check individual bytes CHECK(result[0] == 'I'); - int16_t restored = static_cast(((result[1] << 8) + result[2])); + auto restored = static_cast(((result[1] << 8) + result[2])); CHECK(restored == -9263); // roundtrip @@ -455,7 +455,7 @@ TEST_CASE("UBJSON") // check individual bytes CHECK(result[0] == 'I'); - uint16_t restored = static_cast(static_cast(result[1]) * 256 + static_cast(result[2])); + auto restored = static_cast(static_cast(result[1]) * 256 + static_cast(result[2])); CHECK(restored == i); // roundtrip @@ -583,7 +583,7 @@ TEST_CASE("UBJSON") // check individual bytes CHECK(result[0] == 'i'); - uint8_t restored = static_cast(result[1]); + auto restored = static_cast(result[1]); CHECK(restored == i); // roundtrip @@ -616,7 +616,7 @@ TEST_CASE("UBJSON") // check individual bytes CHECK(result[0] == 'U'); - uint8_t restored = static_cast(result[1]); + auto restored = static_cast(result[1]); CHECK(restored == i); // roundtrip @@ -650,7 +650,7 @@ TEST_CASE("UBJSON") // check individual bytes CHECK(result[0] == 'I'); - uint16_t restored = static_cast(static_cast(result[1]) * 256 + static_cast(result[2])); + auto restored = static_cast(static_cast(result[1]) * 256 + static_cast(result[2])); CHECK(restored == i); // roundtrip @@ -1535,7 +1535,7 @@ TEST_CASE("UBJSON") { SECTION("size=false type=false") { - json j = json::parse("{\"a\": {\"b\": {\"c\": {}}}}"); + json j = json::parse(R"({"a": {"b": {"c": {}}}})"); std::vector expected = { '{', 'i', 1, 'a', '{', 'i', 1, 'b', '{', 'i', 1, 'c', '{', '}', '}', '}', '}' @@ -1550,7 +1550,7 @@ TEST_CASE("UBJSON") SECTION("size=true type=false") { - json j = json::parse("{\"a\": {\"b\": {\"c\": {}}}}"); + json j = json::parse(R"({"a": {"b": {"c": {}}}})"); std::vector expected = { '{', '#', 'i', 1, 'i', 1, 'a', '{', '#', 'i', 1, 'i', 1, 'b', '{', '#', 'i', 1, 'i', 1, 'c', '{', '#', 'i', 0 @@ -1565,7 +1565,7 @@ TEST_CASE("UBJSON") SECTION("size=true type=true") { - json j = json::parse("{\"a\": {\"b\": {\"c\": {}}}}"); + json j = json::parse(R"({"a": {"b": {"c": {}}}})"); std::vector expected = { '{', '$', '{', '#', 'i', 1, 'i', 1, 'a', '$', '{', '#', 'i', 1, 'i', 1, 'b', '$', '{', '#', 'i', 1, 'i', 1, 'c', '#', 'i', 0 @@ -1624,7 +1624,7 @@ TEST_CASE("UBJSON") CHECK_THROWS_AS(_ = json::from_ubjson(v_ubjson), json::out_of_range&); json j; - nlohmann::detail::json_sax_dom_callback_parser scp(j, [](int, json::parse_event_t, const json&) + nlohmann::detail::json_sax_dom_callback_parser scp(j, [](int /*unused*/, json::parse_event_t /*unused*/, const json& /*unused*/) { return true; }); @@ -2439,7 +2439,7 @@ TEST_CASE("all UBJSON first bytes") // check that parse_error.112 is only thrown if the // first byte is not in the supported set INFO_WITH_TEMP(e.what()); - if (std::find(supported.begin(), supported.end(), byte) == supported.end()) + if (supported.find(byte) == supported.end()) { CHECK(e.id == 112); } diff --git a/test/src/unit-udt.cpp b/test/src/unit-udt.cpp index 6c810af76b..39588a6dda 100644 --- a/test/src/unit-udt.cpp +++ b/test/src/unit-udt.cpp @@ -32,7 +32,6 @@ SOFTWARE. #include using nlohmann::json; -#include #include #include #include From 866a4c56f03a118dfef3134a514ea9d4f1620230 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Thu, 28 Jan 2021 12:49:15 +0100 Subject: [PATCH 058/113] :rotating_light: suppress some unhelpful warnings --- .clang-tidy | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.clang-tidy b/.clang-tidy index 395647e8b6..88e89f31e3 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -1,4 +1,5 @@ Checks: '*, + -android-cloexec-fopen, -cppcoreguidelines-avoid-goto, -cppcoreguidelines-avoid-magic-numbers, -cppcoreguidelines-avoid-non-const-global-variables, @@ -15,6 +16,7 @@ Checks: '*, -hicpp-function-size, -hicpp-no-array-decay, -hicpp-no-assembler, + -hicpp-signed-bitwise, -hicpp-uppercase-literal-suffix, -llvm-header-guard, -llvm-include-order, From 675f07f5e9ac9a5d86fea8a742fe529de258c73b Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Thu, 28 Jan 2021 13:51:43 +0100 Subject: [PATCH 059/113] :rotating_light: fix warnings --- test/src/unit-allocator.cpp | 2 +- test/src/unit-capacity.cpp | 20 ++++++------ test/src/unit-cbor.cpp | 24 +++++++------- test/src/unit-class_parser.cpp | 6 ++-- test/src/unit-constructor2.cpp | 20 ++++++------ test/src/unit-items.cpp | 48 ++++++++++++++-------------- test/src/unit-noexcept.cpp | 2 +- test/src/unit-regression1.cpp | 2 +- test/src/unit-regression2.cpp | 4 +-- test/src/unit-ubjson.cpp | 44 ++++++++++++------------- test/src/unit-udt.cpp | 4 +-- test/src/unit-user_defined_input.cpp | 8 ++--- 12 files changed, 92 insertions(+), 92 deletions(-) diff --git a/test/src/unit-allocator.cpp b/test/src/unit-allocator.cpp index f432ac0ad2..962828d201 100644 --- a/test/src/unit-allocator.cpp +++ b/test/src/unit-allocator.cpp @@ -235,7 +235,7 @@ struct allocator_no_forward : std::allocator { allocator_no_forward() = default; template - allocator_no_forward(allocator_no_forward) {} + allocator_no_forward(allocator_no_forward /*unused*/) {} template struct rebind diff --git a/test/src/unit-capacity.cpp b/test/src/unit-capacity.cpp index 9be901bf90..eeee85f1e5 100644 --- a/test/src/unit-capacity.cpp +++ b/test/src/unit-capacity.cpp @@ -437,7 +437,7 @@ TEST_CASE("capacity") SECTION("boolean") { json j = true; - const json j_const(j); + const json j_const = true; SECTION("result of max_size") { @@ -449,7 +449,7 @@ TEST_CASE("capacity") SECTION("string") { json j = "hello world"; - const json j_const(j); + const json j_const = "hello world"; SECTION("result of max_size") { @@ -463,7 +463,7 @@ TEST_CASE("capacity") SECTION("empty array") { json j = json::array(); - const json j_const(j); + const json j_const = json::array(); SECTION("result of max_size") { @@ -475,7 +475,7 @@ TEST_CASE("capacity") SECTION("filled array") { json j = {1, 2, 3}; - const json j_const(j); + const json j_const = {1, 2, 3}; SECTION("result of max_size") { @@ -490,7 +490,7 @@ TEST_CASE("capacity") SECTION("empty object") { json j = json::object(); - const json j_const(j); + const json j_const = json::object(); SECTION("result of max_size") { @@ -502,7 +502,7 @@ TEST_CASE("capacity") SECTION("filled object") { json j = {{"one", 1}, {"two", 2}, {"three", 3}}; - const json j_const(j); + const json j_const = {{"one", 1}, {"two", 2}, {"three", 3}}; SECTION("result of max_size") { @@ -515,7 +515,7 @@ TEST_CASE("capacity") SECTION("number (integer)") { json j = -23; - const json j_const(j); + const json j_const = -23; SECTION("result of max_size") { @@ -527,7 +527,7 @@ TEST_CASE("capacity") SECTION("number (unsigned)") { json j = 23u; - const json j_const(j); + const json j_const = 23u; SECTION("result of max_size") { @@ -539,7 +539,7 @@ TEST_CASE("capacity") SECTION("number (float)") { json j = 23.42; - const json j_const(j); + const json j_const = 23.42; SECTION("result of max_size") { @@ -551,7 +551,7 @@ TEST_CASE("capacity") SECTION("null") { json j = nullptr; - const json j_const(j); + const json j_const = nullptr; SECTION("result of max_size") { diff --git a/test/src/unit-cbor.cpp b/test/src/unit-cbor.cpp index 6a8e709c18..bfbcf54044 100644 --- a/test/src/unit-cbor.cpp +++ b/test/src/unit-cbor.cpp @@ -54,42 +54,42 @@ class SaxCountdown return events_left-- > 0; } - bool boolean(bool) + bool boolean(bool /*unused*/) { return events_left-- > 0; } - bool number_integer(json::number_integer_t) + bool number_integer(json::number_integer_t /*unused*/) { return events_left-- > 0; } - bool number_unsigned(json::number_unsigned_t) + bool number_unsigned(json::number_unsigned_t /*unused*/) { return events_left-- > 0; } - bool number_float(json::number_float_t, const std::string&) + bool number_float(json::number_float_t /*unused*/, const std::string& /*unused*/) { return events_left-- > 0; } - bool string(std::string&) + bool string(std::string& /*unused*/) { return events_left-- > 0; } - bool binary(std::vector&) + bool binary(std::vector& /*unused*/) { return events_left-- > 0; } - bool start_object(std::size_t) + bool start_object(std::size_t /*unused*/) { return events_left-- > 0; } - bool key(std::string&) + bool key(std::string& /*unused*/) { return events_left-- > 0; } @@ -99,7 +99,7 @@ class SaxCountdown return events_left-- > 0; } - bool start_array(std::size_t) + bool start_array(std::size_t /*unused*/) { return events_left-- > 0; } @@ -109,7 +109,7 @@ class SaxCountdown return events_left-- > 0; } - bool parse_error(std::size_t, const std::string&, const json::exception&) + bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, const json::exception& /*unused*/) { return false; } @@ -117,7 +117,7 @@ class SaxCountdown private: int events_left = 0; }; -} +} // namespace TEST_CASE("CBOR") { @@ -294,7 +294,7 @@ TEST_CASE("CBOR") (static_cast(result[3]) << 010) + static_cast(result[4]); CHECK(restored == positive); - CHECK(-1ll - restored == i); + CHECK(-1LL - restored == i); // roundtrip CHECK(json::from_cbor(result) == j); diff --git a/test/src/unit-class_parser.cpp b/test/src/unit-class_parser.cpp index c8255a4bf7..6701f5f4e9 100644 --- a/test/src/unit-class_parser.cpp +++ b/test/src/unit-class_parser.cpp @@ -395,7 +395,7 @@ TEST_CASE("parser class") CHECK_THROWS_AS(parser_helper("\uFF01"), json::parse_error&); CHECK_THROWS_AS(parser_helper("[-4:1,]"), json::parse_error&); // unescaped control characters - CHECK_THROWS_AS(parser_helper("\"\x00\""), json::parse_error&); + CHECK_THROWS_AS(parser_helper("\"\x00\""), json::parse_error&); // NOLINT(bugprone-string-literal-with-embedded-nul) CHECK_THROWS_AS(parser_helper("\"\x01\""), json::parse_error&); CHECK_THROWS_AS(parser_helper("\"\x02\""), json::parse_error&); CHECK_THROWS_AS(parser_helper("\"\x03\""), json::parse_error&); @@ -427,7 +427,7 @@ TEST_CASE("parser class") CHECK_THROWS_AS(parser_helper("\"\x1d\""), json::parse_error&); CHECK_THROWS_AS(parser_helper("\"\x1e\""), json::parse_error&); CHECK_THROWS_AS(parser_helper("\"\x1f\""), json::parse_error&); - CHECK_THROWS_WITH(parser_helper("\"\x00\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: missing closing quote; last read: '\"'"); + CHECK_THROWS_WITH(parser_helper("\"\x00\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: missing closing quote; last read: '\"'"); // NOLINT(bugprone-string-literal-with-embedded-nul) CHECK_THROWS_WITH(parser_helper("\"\x01\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: control character U+0001 (SOH) must be escaped to \\u0001; last read: '\"'"); CHECK_THROWS_WITH(parser_helper("\"\x02\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: control character U+0002 (STX) must be escaped to \\u0002; last read: '\"'"); CHECK_THROWS_WITH(parser_helper("\"\x03\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: control character U+0003 (ETX) must be escaped to \\u0003; last read: '\"'"); @@ -770,7 +770,7 @@ TEST_CASE("parser class") CHECK(accept_helper("\uFF01") == false); CHECK(accept_helper("[-4:1,]") == false); // unescaped control characters - CHECK(accept_helper("\"\x00\"") == false); + CHECK(accept_helper("\"\x00\"") == false); // NOLINT(bugprone-string-literal-with-embedded-nul) CHECK(accept_helper("\"\x01\"") == false); CHECK(accept_helper("\"\x02\"") == false); CHECK(accept_helper("\"\x03\"") == false); diff --git a/test/src/unit-constructor2.cpp b/test/src/unit-constructor2.cpp index a32ad2eefc..eb3ab9207b 100644 --- a/test/src/unit-constructor2.cpp +++ b/test/src/unit-constructor2.cpp @@ -39,63 +39,63 @@ TEST_CASE("other constructors and destructor") SECTION("object") { json j {{"foo", 1}, {"bar", false}}; - json k(j); + json k(j); // NOLINT(performance-unnecessary-copy-initialization) CHECK(j == k); } SECTION("array") { json j {"foo", 1, 42.23, false}; - json k(j); + json k(j); // NOLINT(performance-unnecessary-copy-initialization) CHECK(j == k); } SECTION("null") { json j(nullptr); - json k(j); + json k(j); // NOLINT(performance-unnecessary-copy-initialization) CHECK(j == k); } SECTION("boolean") { json j(true); - json k(j); + json k(j); // NOLINT(performance-unnecessary-copy-initialization) CHECK(j == k); } SECTION("string") { json j("Hello world"); - json k(j); + json k(j); // NOLINT(performance-unnecessary-copy-initialization) CHECK(j == k); } SECTION("number (integer)") { json j(42); - json k(j); + json k(j); // NOLINT(performance-unnecessary-copy-initialization) CHECK(j == k); } SECTION("number (unsigned)") { json j(42u); - json k(j); + json k(j); // NOLINT(performance-unnecessary-copy-initialization) CHECK(j == k); } SECTION("number (floating-point)") { json j(42.23); - json k(j); + json k(j); // NOLINT(performance-unnecessary-copy-initialization) CHECK(j == k); } SECTION("binary") { json j = json::binary({1, 2, 3}); - json k(j); + json k(j); // NOLINT(performance-unnecessary-copy-initialization) CHECK(j == k); } } @@ -106,7 +106,7 @@ TEST_CASE("other constructors and destructor") CHECK(j.type() == json::value_t::object); json k(std::move(j)); CHECK(k.type() == json::value_t::object); - CHECK(j.type() == json::value_t::null); + CHECK(j.type() == json::value_t::null); // NOLINT: access after move is OK here } SECTION("copy assignment") diff --git a/test/src/unit-items.cpp b/test/src/unit-items.cpp index 4caf76f237..6ecd90d7b3 100644 --- a/test/src/unit-items.cpp +++ b/test/src/unit-items.cpp @@ -48,7 +48,7 @@ TEST_CASE("iterator_wrapper") json j = { {"A", 1}, {"B", 2} }; int counter = 1; - for (auto i : json::iterator_wrapper(j)) + for (auto i : json::iterator_wrapper(j)) // NOLINT(performance-for-range-copy) { switch (counter++) { @@ -125,7 +125,7 @@ TEST_CASE("iterator_wrapper") json j = { {"A", 1}, {"B", 2} }; int counter = 1; - for (const auto i : json::iterator_wrapper(j)) + for (const auto i : json::iterator_wrapper(j)) // NOLINT(performance-for-range-copy) { switch (counter++) { @@ -194,7 +194,7 @@ TEST_CASE("iterator_wrapper") const json j = { {"A", 1}, {"B", 2} }; int counter = 1; - for (auto i : json::iterator_wrapper(j)) + for (auto i : json::iterator_wrapper(j)) // NOLINT(performance-for-range-copy) { switch (counter++) { @@ -260,7 +260,7 @@ TEST_CASE("iterator_wrapper") const json j = { {"A", 1}, {"B", 2} }; int counter = 1; - for (const auto i : json::iterator_wrapper(j)) + for (const auto i : json::iterator_wrapper(j)) // NOLINT(performance-for-range-copy) { switch (counter++) { @@ -329,7 +329,7 @@ TEST_CASE("iterator_wrapper") json j = { "A", "B" }; int counter = 1; - for (auto i : json::iterator_wrapper(j)) + for (auto i : json::iterator_wrapper(j)) // NOLINT(performance-for-range-copy) { switch (counter++) { @@ -406,7 +406,7 @@ TEST_CASE("iterator_wrapper") json j = { "A", "B" }; int counter = 1; - for (const auto i : json::iterator_wrapper(j)) + for (const auto i : json::iterator_wrapper(j)) // NOLINT(performance-for-range-copy) { switch (counter++) { @@ -475,7 +475,7 @@ TEST_CASE("iterator_wrapper") const json j = { "A", "B" }; int counter = 1; - for (auto i : json::iterator_wrapper(j)) + for (auto i : json::iterator_wrapper(j)) // NOLINT(performance-for-range-copy) { switch (counter++) { @@ -541,7 +541,7 @@ TEST_CASE("iterator_wrapper") const json j = { "A", "B" }; int counter = 1; - for (const auto i : json::iterator_wrapper(j)) + for (const auto i : json::iterator_wrapper(j)) // NOLINT(performance-for-range-copy) { switch (counter++) { @@ -610,7 +610,7 @@ TEST_CASE("iterator_wrapper") json j = 1; int counter = 1; - for (auto i : json::iterator_wrapper(j)) + for (auto i : json::iterator_wrapper(j)) // NOLINT(performance-for-range-copy) { ++counter; CHECK(i.key() == ""); @@ -646,7 +646,7 @@ TEST_CASE("iterator_wrapper") json j = 1; int counter = 1; - for (const auto i : json::iterator_wrapper(j)) + for (const auto i : json::iterator_wrapper(j)) // NOLINT(performance-for-range-copy) { ++counter; CHECK(i.key() == ""); @@ -679,7 +679,7 @@ TEST_CASE("iterator_wrapper") const json j = 1; int counter = 1; - for (auto i : json::iterator_wrapper(j)) + for (auto i : json::iterator_wrapper(j)) // NOLINT(performance-for-range-copy) { ++counter; CHECK(i.key() == ""); @@ -709,7 +709,7 @@ TEST_CASE("iterator_wrapper") const json j = 1; int counter = 1; - for (const auto i : json::iterator_wrapper(j)) + for (const auto i : json::iterator_wrapper(j)) // NOLINT(performance-for-range-copy) { ++counter; CHECK(i.key() == ""); @@ -745,7 +745,7 @@ TEST_CASE("items()") json j = { {"A", 1}, {"B", 2} }; int counter = 1; - for (auto i : j.items()) + for (auto i : j.items()) // NOLINT(performance-for-range-copy) { switch (counter++) { @@ -822,7 +822,7 @@ TEST_CASE("items()") json j = { {"A", 1}, {"B", 2} }; int counter = 1; - for (const auto i : j.items()) + for (const auto i : j.items()) // NOLINT(performance-for-range-copy) { switch (counter++) { @@ -907,7 +907,7 @@ TEST_CASE("items()") const json j = { {"A", 1}, {"B", 2} }; int counter = 1; - for (auto i : j.items()) + for (auto i : j.items()) // NOLINT(performance-for-range-copy) { switch (counter++) { @@ -973,7 +973,7 @@ TEST_CASE("items()") const json j = { {"A", 1}, {"B", 2} }; int counter = 1; - for (const auto i : j.items()) + for (const auto i : j.items()) // NOLINT(performance-for-range-copy) { switch (counter++) { @@ -1042,7 +1042,7 @@ TEST_CASE("items()") json j = { "A", "B" }; int counter = 1; - for (auto i : j.items()) + for (auto i : j.items()) // NOLINT(performance-for-range-copy) { switch (counter++) { @@ -1119,7 +1119,7 @@ TEST_CASE("items()") json j = { "A", "B" }; int counter = 1; - for (const auto i : j.items()) + for (const auto i : j.items()) // NOLINT(performance-for-range-copy) { switch (counter++) { @@ -1188,7 +1188,7 @@ TEST_CASE("items()") const json j = { "A", "B" }; int counter = 1; - for (auto i : j.items()) + for (auto i : j.items()) // NOLINT(performance-for-range-copy) { switch (counter++) { @@ -1254,7 +1254,7 @@ TEST_CASE("items()") const json j = { "A", "B" }; int counter = 1; - for (const auto i : j.items()) + for (const auto i : j.items()) // NOLINT(performance-for-range-copy) { switch (counter++) { @@ -1323,7 +1323,7 @@ TEST_CASE("items()") json j = 1; int counter = 1; - for (auto i : j.items()) + for (auto i : j.items()) // NOLINT(performance-for-range-copy) { ++counter; CHECK(i.key() == ""); @@ -1359,7 +1359,7 @@ TEST_CASE("items()") json j = 1; int counter = 1; - for (const auto i : j.items()) + for (const auto i : j.items()) // NOLINT(performance-for-range-copy) { ++counter; CHECK(i.key() == ""); @@ -1392,7 +1392,7 @@ TEST_CASE("items()") const json j = 1; int counter = 1; - for (auto i : j.items()) + for (auto i : j.items()) // NOLINT(performance-for-range-copy) { ++counter; CHECK(i.key() == ""); @@ -1422,7 +1422,7 @@ TEST_CASE("items()") const json j = 1; int counter = 1; - for (const auto i : j.items()) + for (const auto i : j.items()) // NOLINT(performance-for-range-copy) { ++counter; CHECK(i.key() == ""); diff --git a/test/src/unit-noexcept.cpp b/test/src/unit-noexcept.cpp index b4bd801c52..25de3410f6 100644 --- a/test/src/unit-noexcept.cpp +++ b/test/src/unit-noexcept.cpp @@ -51,7 +51,7 @@ void to_json(json& /*unused*/, pod_bis /*unused*/) {} void from_json(const json& /*unused*/, pod /*unused*/) noexcept {} void from_json(const json& /*unused*/, pod_bis /*unused*/) {} -static json* j = nullptr; +json* j = nullptr; static_assert(noexcept(json{}), ""); static_assert(noexcept(nlohmann::to_json(*j, 2)), ""); diff --git a/test/src/unit-regression1.cpp b/test/src/unit-regression1.cpp index 211fc187c5..d50fad10c6 100644 --- a/test/src/unit-regression1.cpp +++ b/test/src/unit-regression1.cpp @@ -135,7 +135,7 @@ TEST_CASE("regression tests 1") { SECTION("escape_doublequote") { - const auto* s = "[\"\\\"foo\\\"\"]"; + const auto* s = R"(["\"foo\""])"; json j = json::parse(s); auto expected = R"(["\"foo\""])"_json; CHECK(j == expected); diff --git a/test/src/unit-regression2.cpp b/test/src/unit-regression2.cpp index e02f575eff..6dd6e53d5a 100644 --- a/test/src/unit-regression2.cpp +++ b/test/src/unit-regression2.cpp @@ -87,7 +87,7 @@ NLOHMANN_JSON_SERIALIZE_ENUM(for_1647, struct Data { Data() = default; - Data(std::string a_, const std::string b_) : a(std::move(a_)), b(b_) {} + Data(std::string a_, std::string b_) : a(std::move(a_)), b(std::move(b_)) {} std::string a {}; std::string b {}; }; @@ -115,7 +115,7 @@ namespace nlohmann template <> struct adl_serializer { - static NonDefaultFromJsonStruct from_json (json const&) noexcept + static NonDefaultFromJsonStruct from_json (json const& /*unused*/) noexcept { return {}; } diff --git a/test/src/unit-ubjson.cpp b/test/src/unit-ubjson.cpp index 3abb644e1c..47fcf24496 100644 --- a/test/src/unit-ubjson.cpp +++ b/test/src/unit-ubjson.cpp @@ -51,42 +51,42 @@ class SaxCountdown return events_left-- > 0; } - bool boolean(bool) + bool boolean(bool /*unused*/) { return events_left-- > 0; } - bool number_integer(json::number_integer_t) + bool number_integer(json::number_integer_t /*unused*/) { return events_left-- > 0; } - bool number_unsigned(json::number_unsigned_t) + bool number_unsigned(json::number_unsigned_t /*unused*/) { return events_left-- > 0; } - bool number_float(json::number_float_t, const std::string&) + bool number_float(json::number_float_t /*unused*/, const std::string& /*unused*/) { return events_left-- > 0; } - bool string(std::string&) + bool string(std::string& /*unused*/) { return events_left-- > 0; } - bool binary(std::vector&) + bool binary(std::vector& /*unused*/) { return events_left-- > 0; } - bool start_object(std::size_t) + bool start_object(std::size_t /*unused*/) { return events_left-- > 0; } - bool key(std::string&) + bool key(std::string& /*unused*/) { return events_left-- > 0; } @@ -96,7 +96,7 @@ class SaxCountdown return events_left-- > 0; } - bool start_array(std::size_t) + bool start_array(std::size_t /*unused*/) { return events_left-- > 0; } @@ -106,7 +106,7 @@ class SaxCountdown return events_left-- > 0; } - bool parse_error(std::size_t, const std::string&, const json::exception&) + bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, const json::exception& /*unused*/) { return false; } @@ -114,7 +114,7 @@ class SaxCountdown private: int events_left = 0; }; -} +} // namespace TEST_CASE("UBJSON") { @@ -175,16 +175,16 @@ TEST_CASE("UBJSON") { std::vector numbers; numbers.push_back((std::numeric_limits::min)()); - numbers.push_back(-1000000000000000000ll); - numbers.push_back(-100000000000000000ll); - numbers.push_back(-10000000000000000ll); - numbers.push_back(-1000000000000000ll); - numbers.push_back(-100000000000000ll); - numbers.push_back(-10000000000000ll); - numbers.push_back(-1000000000000ll); - numbers.push_back(-100000000000ll); - numbers.push_back(-10000000000ll); - numbers.push_back(-2147483649ll); + numbers.push_back(-1000000000000000000LL); + numbers.push_back(-100000000000000000LL); + numbers.push_back(-10000000000000000LL); + numbers.push_back(-1000000000000000LL); + numbers.push_back(-100000000000000LL); + numbers.push_back(-10000000000000LL); + numbers.push_back(-1000000000000LL); + numbers.push_back(-100000000000LL); + numbers.push_back(-10000000000LL); + numbers.push_back(-2147483649LL); for (auto i : numbers) { CAPTURE(i) @@ -1610,7 +1610,7 @@ TEST_CASE("UBJSON") CHECK_THROWS_AS(_ = json::from_ubjson(v_ubjson), json::out_of_range&); json j; - nlohmann::detail::json_sax_dom_callback_parser scp(j, [](int, json::parse_event_t, const json&) + nlohmann::detail::json_sax_dom_callback_parser scp(j, [](int /*unused*/, json::parse_event_t /*unused*/, const json& /*unused*/) { return true; }); diff --git a/test/src/unit-udt.cpp b/test/src/unit-udt.cpp index 39588a6dda..623d4bb2c1 100644 --- a/test/src/unit-udt.cpp +++ b/test/src/unit-udt.cpp @@ -345,8 +345,8 @@ namespace udt { struct legacy_type { - std::string number; - legacy_type() : number() {} + std::string number{}; + legacy_type() = default; legacy_type(std::string n) : number(std::move(n)) {} }; } // namespace udt diff --git a/test/src/unit-user_defined_input.cpp b/test/src/unit-user_defined_input.cpp index 4138460057..689f450b78 100644 --- a/test/src/unit-user_defined_input.cpp +++ b/test/src/unit-user_defined_input.cpp @@ -60,7 +60,7 @@ const char* begin(const MyContainer& c) const char* end(const MyContainer& c) { - return c.data + strlen(c.data); + return c.data + strlen(c.data); // NOLINT(cppcoreguidelines-pro-bounds-pointer-arithmetic) } TEST_CASE("Custom container non-member begin/end") @@ -88,7 +88,7 @@ TEST_CASE("Custom container member begin/end") const char* end() const { - return data + strlen(data); + return data + strlen(data); // NOLINT(cppcoreguidelines-pro-bounds-pointer-arithmetic) } }; @@ -114,7 +114,7 @@ TEST_CASE("Custom iterator") MyIterator& operator++() { - ++ptr; + ++ptr; // NOLINT(cppcoreguidelines-pro-bounds-pointer-arithmetic) return *this; } @@ -139,7 +139,7 @@ TEST_CASE("Custom iterator") CHECK(std::is_same::value); MyIterator begin{raw_data}; - MyIterator end{raw_data + strlen(raw_data)}; + MyIterator end{raw_data + strlen(raw_data)}; // NOLINT(cppcoreguidelines-pro-bounds-pointer-arithmetic) json as_json = json::parse(begin, end); CHECK(as_json.at(0) == 1); From 28d5f863b30d09b801014d2a1acb5c5529507dd0 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Thu, 28 Jan 2021 14:46:29 +0100 Subject: [PATCH 060/113] :rotating_light: fix warnings --- test/src/unit-element_access2.cpp | 68 +++++++++++++++---------------- test/src/unit-regression1.cpp | 6 +-- test/src/unit-testsuites.cpp | 6 +-- 3 files changed, 38 insertions(+), 42 deletions(-) diff --git a/test/src/unit-element_access2.cpp b/test/src/unit-element_access2.cpp index 18fc6a20ca..af2c2cac7f 100644 --- a/test/src/unit-element_access2.cpp +++ b/test/src/unit-element_access2.cpp @@ -202,7 +202,7 @@ TEST_CASE("element access 2") SECTION("null") { json j_nonobject(json::value_t::null); - const json j_nonobject_const(j_nonobject); + const json j_nonobject_const(json::value_t::null); CHECK_THROWS_AS(j_nonobject.value("foo", 1), json::type_error&); CHECK_THROWS_AS(j_nonobject_const.value("foo", 1), json::type_error&); CHECK_THROWS_WITH(j_nonobject.value("foo", 1), @@ -214,7 +214,7 @@ TEST_CASE("element access 2") SECTION("boolean") { json j_nonobject(json::value_t::boolean); - const json j_nonobject_const(j_nonobject); + const json j_nonobject_const(json::value_t::boolean); CHECK_THROWS_AS(j_nonobject.value("foo", 1), json::type_error&); CHECK_THROWS_AS(j_nonobject_const.value("foo", 1), json::type_error&); CHECK_THROWS_WITH(j_nonobject.value("foo", 1), @@ -226,7 +226,7 @@ TEST_CASE("element access 2") SECTION("string") { json j_nonobject(json::value_t::string); - const json j_nonobject_const(j_nonobject); + const json j_nonobject_const(json::value_t::string); CHECK_THROWS_AS(j_nonobject.value("foo", 1), json::type_error&); CHECK_THROWS_AS(j_nonobject_const.value("foo", 1), json::type_error&); CHECK_THROWS_WITH(j_nonobject.value("foo", 1), @@ -238,7 +238,7 @@ TEST_CASE("element access 2") SECTION("array") { json j_nonobject(json::value_t::array); - const json j_nonobject_const(j_nonobject); + const json j_nonobject_const(json::value_t::array); CHECK_THROWS_AS(j_nonobject.value("foo", 1), json::type_error&); CHECK_THROWS_AS(j_nonobject_const.value("foo", 1), json::type_error&); CHECK_THROWS_WITH(j_nonobject.value("foo", 1), @@ -250,7 +250,7 @@ TEST_CASE("element access 2") SECTION("number (integer)") { json j_nonobject(json::value_t::number_integer); - const json j_nonobject_const(j_nonobject); + const json j_nonobject_const(json::value_t::number_integer); CHECK_THROWS_AS(j_nonobject.value("foo", 1), json::type_error&); CHECK_THROWS_AS(j_nonobject_const.value("foo", 1), json::type_error&); CHECK_THROWS_WITH(j_nonobject.value("foo", 1), @@ -262,7 +262,7 @@ TEST_CASE("element access 2") SECTION("number (unsigned)") { json j_nonobject(json::value_t::number_unsigned); - const json j_nonobject_const(j_nonobject); + const json j_nonobject_const(json::value_t::number_unsigned); CHECK_THROWS_AS(j_nonobject.value("foo", 1), json::type_error&); CHECK_THROWS_AS(j_nonobject_const.value("foo", 1), json::type_error&); CHECK_THROWS_WITH(j_nonobject.value("foo", 1), @@ -274,7 +274,7 @@ TEST_CASE("element access 2") SECTION("number (floating-point)") { json j_nonobject(json::value_t::number_float); - const json j_nonobject_const(j_nonobject); + const json j_nonobject_const(json::value_t::number_float); CHECK_THROWS_AS(j_nonobject.value("foo", 1), json::type_error&); CHECK_THROWS_AS(j_nonobject_const.value("foo", 1), json::type_error&); CHECK_THROWS_WITH(j_nonobject.value("foo", 1), @@ -320,7 +320,7 @@ TEST_CASE("element access 2") SECTION("null") { json j_nonobject(json::value_t::null); - const json j_nonobject_const(j_nonobject); + const json j_nonobject_const(json::value_t::null); CHECK_THROWS_AS(j_nonobject.value("/foo"_json_pointer, 1), json::type_error&); CHECK_THROWS_AS(j_nonobject_const.value("/foo"_json_pointer, 1), json::type_error&); CHECK_THROWS_WITH(j_nonobject.value("/foo"_json_pointer, 1), @@ -332,7 +332,7 @@ TEST_CASE("element access 2") SECTION("boolean") { json j_nonobject(json::value_t::boolean); - const json j_nonobject_const(j_nonobject); + const json j_nonobject_const(json::value_t::boolean); CHECK_THROWS_AS(j_nonobject.value("/foo"_json_pointer, 1), json::type_error&); CHECK_THROWS_AS(j_nonobject_const.value("/foo"_json_pointer, 1), json::type_error&); CHECK_THROWS_WITH(j_nonobject.value("/foo"_json_pointer, 1), @@ -344,7 +344,7 @@ TEST_CASE("element access 2") SECTION("string") { json j_nonobject(json::value_t::string); - const json j_nonobject_const(j_nonobject); + const json j_nonobject_const(json::value_t::string); CHECK_THROWS_AS(j_nonobject.value("/foo"_json_pointer, 1), json::type_error&); CHECK_THROWS_AS(j_nonobject_const.value("/foo"_json_pointer, 1), json::type_error&); CHECK_THROWS_WITH(j_nonobject.value("/foo"_json_pointer, 1), @@ -356,7 +356,7 @@ TEST_CASE("element access 2") SECTION("array") { json j_nonobject(json::value_t::array); - const json j_nonobject_const(j_nonobject); + const json j_nonobject_const(json::value_t::array); CHECK_THROWS_AS(j_nonobject.value("/foo"_json_pointer, 1), json::type_error&); CHECK_THROWS_AS(j_nonobject_const.value("/foo"_json_pointer, 1), json::type_error&); CHECK_THROWS_WITH(j_nonobject.value("/foo"_json_pointer, 1), @@ -368,7 +368,7 @@ TEST_CASE("element access 2") SECTION("number (integer)") { json j_nonobject(json::value_t::number_integer); - const json j_nonobject_const(j_nonobject); + const json j_nonobject_const(json::value_t::number_integer); CHECK_THROWS_AS(j_nonobject.value("/foo"_json_pointer, 1), json::type_error&); CHECK_THROWS_AS(j_nonobject_const.value("/foo"_json_pointer, 1), json::type_error&); CHECK_THROWS_WITH(j_nonobject.value("/foo"_json_pointer, 1), @@ -380,7 +380,7 @@ TEST_CASE("element access 2") SECTION("number (unsigned)") { json j_nonobject(json::value_t::number_unsigned); - const json j_nonobject_const(j_nonobject); + const json j_nonobject_const(json::value_t::number_unsigned); CHECK_THROWS_AS(j_nonobject.value("/foo"_json_pointer, 1), json::type_error&); CHECK_THROWS_AS(j_nonobject_const.value("/foo"_json_pointer, 1), json::type_error&); CHECK_THROWS_WITH(j_nonobject.value("/foo"_json_pointer, 1), @@ -392,7 +392,7 @@ TEST_CASE("element access 2") SECTION("number (floating-point)") { json j_nonobject(json::value_t::number_float); - const json j_nonobject_const(j_nonobject); + const json j_nonobject_const(json::value_t::number_float); CHECK_THROWS_AS(j_nonobject.value("/foo"_json_pointer, 1), json::type_error&); CHECK_THROWS_AS(j_nonobject_const.value("/foo"_json_pointer, 1), json::type_error&); CHECK_THROWS_WITH(j_nonobject.value("/foo"_json_pointer, 1), @@ -811,7 +811,7 @@ TEST_CASE("element access 2") { SECTION("existing element") { - for (auto key : + for (const auto *key : {"integer", "unsigned", "floating", "null", "string", "boolean", "object", "array" }) { @@ -900,7 +900,7 @@ TEST_CASE("element access 2") { SECTION("existing element") { - for (auto key : + for (const auto *key : {"integer", "unsigned", "floating", "null", "string", "boolean", "object", "array" }) { @@ -920,7 +920,7 @@ TEST_CASE("element access 2") SECTION("null") { json j_nonobject(json::value_t::null); - const json j_nonobject_const(j_nonobject); + const json j_nonobject_const(json::value_t::null); CHECK(j_nonobject.count("foo") == 0); CHECK(j_nonobject_const.count("foo") == 0); } @@ -928,7 +928,7 @@ TEST_CASE("element access 2") SECTION("string") { json j_nonobject(json::value_t::string); - const json j_nonobject_const(j_nonobject); + const json j_nonobject_const(json::value_t::string); CHECK(j_nonobject.count("foo") == 0); CHECK(j_nonobject_const.count("foo") == 0); } @@ -936,7 +936,7 @@ TEST_CASE("element access 2") SECTION("object") { json j_nonobject(json::value_t::object); - const json j_nonobject_const(j_nonobject); + const json j_nonobject_const(json::value_t::object); CHECK(j_nonobject.count("foo") == 0); CHECK(j_nonobject_const.count("foo") == 0); } @@ -944,7 +944,7 @@ TEST_CASE("element access 2") SECTION("array") { json j_nonobject(json::value_t::array); - const json j_nonobject_const(j_nonobject); + const json j_nonobject_const(json::value_t::array); CHECK(j_nonobject.count("foo") == 0); CHECK(j_nonobject_const.count("foo") == 0); } @@ -952,7 +952,7 @@ TEST_CASE("element access 2") SECTION("boolean") { json j_nonobject(json::value_t::boolean); - const json j_nonobject_const(j_nonobject); + const json j_nonobject_const(json::value_t::boolean); CHECK(j_nonobject.count("foo") == 0); CHECK(j_nonobject_const.count("foo") == 0); } @@ -960,7 +960,7 @@ TEST_CASE("element access 2") SECTION("number (integer)") { json j_nonobject(json::value_t::number_integer); - const json j_nonobject_const(j_nonobject); + const json j_nonobject_const(json::value_t::number_integer); CHECK(j_nonobject.count("foo") == 0); CHECK(j_nonobject_const.count("foo") == 0); } @@ -968,7 +968,7 @@ TEST_CASE("element access 2") SECTION("number (unsigned)") { json j_nonobject(json::value_t::number_unsigned); - const json j_nonobject_const(j_nonobject); + const json j_nonobject_const(json::value_t::number_unsigned); CHECK(j_nonobject.count("foo") == 0); CHECK(j_nonobject_const.count("foo") == 0); } @@ -976,7 +976,7 @@ TEST_CASE("element access 2") SECTION("number (floating-point)") { json j_nonobject(json::value_t::number_float); - const json j_nonobject_const(j_nonobject); + const json j_nonobject_const(json::value_t::number_float); CHECK(j_nonobject.count("foo") == 0); CHECK(j_nonobject_const.count("foo") == 0); } @@ -987,7 +987,7 @@ TEST_CASE("element access 2") { SECTION("existing element") { - for (auto key : + for (const auto *key : {"integer", "unsigned", "floating", "null", "string", "boolean", "object", "array" }) { @@ -1007,7 +1007,7 @@ TEST_CASE("element access 2") SECTION("null") { json j_nonobject(json::value_t::null); - const json j_nonobject_const(j_nonobject); + const json j_nonobject_const(json::value_t::null); CHECK(j_nonobject.contains("foo") == false); CHECK(j_nonobject_const.contains("foo") == false); } @@ -1015,7 +1015,7 @@ TEST_CASE("element access 2") SECTION("string") { json j_nonobject(json::value_t::string); - const json j_nonobject_const(j_nonobject); + const json j_nonobject_const(json::value_t::string); CHECK(j_nonobject.contains("foo") == false); CHECK(j_nonobject_const.contains("foo") == false); } @@ -1023,7 +1023,7 @@ TEST_CASE("element access 2") SECTION("object") { json j_nonobject(json::value_t::object); - const json j_nonobject_const(j_nonobject); + const json j_nonobject_const(json::value_t::object); CHECK(j_nonobject.contains("foo") == false); CHECK(j_nonobject_const.contains("foo") == false); } @@ -1031,7 +1031,7 @@ TEST_CASE("element access 2") SECTION("array") { json j_nonobject(json::value_t::array); - const json j_nonobject_const(j_nonobject); + const json j_nonobject_const(json::value_t::array); CHECK(j_nonobject.contains("foo") == false); CHECK(j_nonobject_const.contains("foo") == false); } @@ -1039,7 +1039,7 @@ TEST_CASE("element access 2") SECTION("boolean") { json j_nonobject(json::value_t::boolean); - const json j_nonobject_const(j_nonobject); + const json j_nonobject_const(json::value_t::boolean); CHECK(j_nonobject.contains("foo") == false); CHECK(j_nonobject_const.contains("foo") == false); } @@ -1047,7 +1047,7 @@ TEST_CASE("element access 2") SECTION("number (integer)") { json j_nonobject(json::value_t::number_integer); - const json j_nonobject_const(j_nonobject); + const json j_nonobject_const(json::value_t::number_integer); CHECK(j_nonobject.contains("foo") == false); CHECK(j_nonobject_const.contains("foo") == false); } @@ -1055,7 +1055,7 @@ TEST_CASE("element access 2") SECTION("number (unsigned)") { json j_nonobject(json::value_t::number_unsigned); - const json j_nonobject_const(j_nonobject); + const json j_nonobject_const(json::value_t::number_unsigned); CHECK(j_nonobject.contains("foo") == false); CHECK(j_nonobject_const.contains("foo") == false); } @@ -1063,7 +1063,7 @@ TEST_CASE("element access 2") SECTION("number (floating-point)") { json j_nonobject(json::value_t::number_float); - const json j_nonobject_const(j_nonobject); + const json j_nonobject_const(json::value_t::number_float); CHECK(j_nonobject.contains("foo") == false); CHECK(j_nonobject_const.contains("foo") == false); } @@ -1078,7 +1078,7 @@ TEST_CASE("element access 2 (throwing tests)") SECTION("object") { json j = {{"integer", 1}, {"unsigned", 1u}, {"floating", 42.23}, {"null", nullptr}, {"string", "hello world"}, {"boolean", true}, {"object", json::object()}, {"array", {1, 2, 3}}}; - const json j_const = j; + const json j_const = {{"integer", 1}, {"unsigned", 1u}, {"floating", 42.23}, {"null", nullptr}, {"string", "hello world"}, {"boolean", true}, {"object", json::object()}, {"array", {1, 2, 3}}}; SECTION("access specified element with default value") { diff --git a/test/src/unit-regression1.cpp b/test/src/unit-regression1.cpp index d50fad10c6..4143243f5b 100644 --- a/test/src/unit-regression1.cpp +++ b/test/src/unit-regression1.cpp @@ -1109,7 +1109,7 @@ TEST_CASE("regression tests 1") CHECK_TYPE(0u) CHECK_TYPE(0L) CHECK_TYPE(0.0) - CHECK_TYPE("") + CHECK_TYPE("") // NOLINT(readability-container-size-empty) #undef CHECK_TYPE } @@ -1443,8 +1443,8 @@ TEST_CASE("regression tests 1") SECTION("issue #838 - incorrect parse error with binary data in keys") { - uint8_t key1[] = { 103, 92, 117, 48, 48, 48, 55, 92, 114, 215, 126, 214, 95, 92, 34, 174, 40, 71, 38, 174, 40, 71, 38, 223, 134, 247, 127, 0 }; - std::string key1_str(reinterpret_cast(key1)); + std::array key1 = { 103, 92, 117, 48, 48, 48, 55, 92, 114, 215, 126, 214, 95, 92, 34, 174, 40, 71, 38, 174, 40, 71, 38, 223, 134, 247, 127, 0 }; + std::string key1_str(reinterpret_cast(key1.data())); json j = key1_str; CHECK_THROWS_AS(j.dump(), json::type_error&); CHECK_THROWS_WITH(j.dump(), "[json.exception.type_error.316] invalid UTF-8 byte at index 10: 0x7E"); diff --git a/test/src/unit-testsuites.cpp b/test/src/unit-testsuites.cpp index 88bb272d69..96239987d7 100644 --- a/test/src/unit-testsuites.cpp +++ b/test/src/unit-testsuites.cpp @@ -235,13 +235,9 @@ TEST_CASE("compliance tests from nativejson-benchmark") 5708990770823839524233143877797980545530986496.0); { - char n1e308[312]; // '1' followed by 308 '0' + std::string n1e308(312, '0'); // '1' followed by 308 '0' n1e308[0] = '['; n1e308[1] = '1'; - for (int j = 2; j < 310; j++) - { - n1e308[j] = '0'; - } n1e308[310] = ']'; n1e308[311] = '\0'; TEST_DOUBLE(n1e308, 1E308); From ae78c968cfb91e29e314211545458b3d118fece5 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Thu, 28 Jan 2021 14:46:46 +0100 Subject: [PATCH 061/113] :art: fix format --- test/src/unit-element_access2.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/src/unit-element_access2.cpp b/test/src/unit-element_access2.cpp index af2c2cac7f..40b7ac9d6a 100644 --- a/test/src/unit-element_access2.cpp +++ b/test/src/unit-element_access2.cpp @@ -811,7 +811,7 @@ TEST_CASE("element access 2") { SECTION("existing element") { - for (const auto *key : + for (const auto* key : {"integer", "unsigned", "floating", "null", "string", "boolean", "object", "array" }) { @@ -900,7 +900,7 @@ TEST_CASE("element access 2") { SECTION("existing element") { - for (const auto *key : + for (const auto* key : {"integer", "unsigned", "floating", "null", "string", "boolean", "object", "array" }) { @@ -987,7 +987,7 @@ TEST_CASE("element access 2") { SECTION("existing element") { - for (const auto *key : + for (const auto* key : {"integer", "unsigned", "floating", "null", "string", "boolean", "object", "array" }) { From cdd6412ab24a7163c6097c1b721988e78726fb61 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Thu, 28 Jan 2021 15:10:53 +0100 Subject: [PATCH 062/113] :rotating_light: fix warnings --- .clang-tidy | 1 + test/src/unit-regression1.cpp | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.clang-tidy b/.clang-tidy index 88e89f31e3..5ad3eea85a 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -20,6 +20,7 @@ Checks: '*, -hicpp-uppercase-literal-suffix, -llvm-header-guard, -llvm-include-order, + -misc-no-recursion, -misc-non-private-member-variables-in-classes, -modernize-use-trailing-return-type, -readability-function-size, diff --git a/test/src/unit-regression1.cpp b/test/src/unit-regression1.cpp index 4143243f5b..e7a5c910a0 100644 --- a/test/src/unit-regression1.cpp +++ b/test/src/unit-regression1.cpp @@ -1443,7 +1443,7 @@ TEST_CASE("regression tests 1") SECTION("issue #838 - incorrect parse error with binary data in keys") { - std::array key1 = { 103, 92, 117, 48, 48, 48, 55, 92, 114, 215, 126, 214, 95, 92, 34, 174, 40, 71, 38, 174, 40, 71, 38, 223, 134, 247, 127, 0 }; + std::array key1 = {{ 103, 92, 117, 48, 48, 48, 55, 92, 114, 215, 126, 214, 95, 92, 34, 174, 40, 71, 38, 174, 40, 71, 38, 223, 134, 247, 127, 0 }}; std::string key1_str(reinterpret_cast(key1.data())); json j = key1_str; CHECK_THROWS_AS(j.dump(), json::type_error&); From 09bf771fd498bd81fdb577da7aa76a92ff6437d1 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Thu, 28 Jan 2021 15:44:04 +0100 Subject: [PATCH 063/113] :rotating_light: fix warnings --- .clang-tidy | 2 ++ include/nlohmann/detail/iterators/iteration_proxy.hpp | 2 +- test/src/unit-udt.cpp | 4 ++-- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.clang-tidy b/.clang-tidy index 5ad3eea85a..93deac3cb7 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -31,3 +31,5 @@ Checks: '*, CheckOptions: - key: hicpp-special-member-functions.AllowSoleDefaultDtor value: 1 + +HeaderFilterRegex: '.*nlohmann.*' diff --git a/include/nlohmann/detail/iterators/iteration_proxy.hpp b/include/nlohmann/detail/iterators/iteration_proxy.hpp index 3e181d5d92..1b47faeb3e 100644 --- a/include/nlohmann/detail/iterators/iteration_proxy.hpp +++ b/include/nlohmann/detail/iterators/iteration_proxy.hpp @@ -39,7 +39,7 @@ template class iteration_proxy_value /// a string representation of the array index mutable string_type array_index_str = "0"; /// an empty string (to return a reference for primitive values) - const string_type empty_str; + const string_type empty_str{}; public: explicit iteration_proxy_value(IteratorType it) noexcept : anchor(it) {} diff --git a/test/src/unit-udt.cpp b/test/src/unit-udt.cpp index 623d4bb2c1..96398dbea1 100644 --- a/test/src/unit-udt.cpp +++ b/test/src/unit-udt.cpp @@ -618,8 +618,8 @@ struct small_pod struct non_pod { - std::string s; - non_pod() : s() {} + std::string s{}; + non_pod() = default; non_pod(std::string S) : s(std::move(S)) {} }; From bfcbb43b7e486e02b740fa4f1f77e4e514e6f0fc Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Thu, 28 Jan 2021 15:48:53 +0100 Subject: [PATCH 064/113] :rotating_light: fix warnings --- cmake/ci.cmake | 2 +- single_include/nlohmann/json.hpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmake/ci.cmake b/cmake/ci.cmake index 01ade85626..7d83aa3d61 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -359,7 +359,7 @@ set(GCC_CXXFLAGS "-std=c++11 \ add_custom_target(ci_test_gcc COMMAND CXX=${GCC_TOOL} CXXFLAGS=${GCC_CXXFLAGS} ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -GNinja - -DJSON_BuildTests=ON + -DJSON_BuildTests=ON -DJSON_MultipleHeaders=ON -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_gcc COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_gcc COMMAND cd ${PROJECT_BINARY_DIR}/build_gcc && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure diff --git a/single_include/nlohmann/json.hpp b/single_include/nlohmann/json.hpp index 9a04768ca8..8b3ede008d 100644 --- a/single_include/nlohmann/json.hpp +++ b/single_include/nlohmann/json.hpp @@ -3940,7 +3940,7 @@ template class iteration_proxy_value /// a string representation of the array index mutable string_type array_index_str = "0"; /// an empty string (to return a reference for primitive values) - const string_type empty_str; + const string_type empty_str{}; public: explicit iteration_proxy_value(IteratorType it) noexcept : anchor(it) {} From bdcabc05d1827cd6a8135572b237832651dd60eb Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Thu, 28 Jan 2021 16:02:48 +0100 Subject: [PATCH 065/113] :rotating_light: fix warnings --- .clang-tidy | 1 + include/nlohmann/detail/conversions/from_json.hpp | 2 +- include/nlohmann/detail/json_ref.hpp | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.clang-tidy b/.clang-tidy index 93deac3cb7..6bcd36d6e3 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -4,6 +4,7 @@ Checks: '*, -cppcoreguidelines-avoid-magic-numbers, -cppcoreguidelines-avoid-non-const-global-variables, -cppcoreguidelines-macro-usage, + -cppcoreguidelines-pro-bounds-array-to-pointer-decay, -cppcoreguidelines-pro-type-union-access, -fuchsia-default-arguments-calls, -fuchsia-default-arguments-declarations, diff --git a/include/nlohmann/detail/conversions/from_json.hpp b/include/nlohmann/detail/conversions/from_json.hpp index 438b84a2e1..7803d4b24e 100644 --- a/include/nlohmann/detail/conversions/from_json.hpp +++ b/include/nlohmann/detail/conversions/from_json.hpp @@ -161,7 +161,7 @@ void from_json(const BasicJsonType& j, std::valarray& l) } template -auto from_json(const BasicJsonType& j, T (&arr)[N]) +auto from_json(const BasicJsonType& j, T (&arr)[N]) // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays) -> decltype(j.template get(), void()) { for (std::size_t i = 0; i < N; ++i) diff --git a/include/nlohmann/detail/json_ref.hpp b/include/nlohmann/detail/json_ref.hpp index 26a4903828..b4e5dabaf7 100644 --- a/include/nlohmann/detail/json_ref.hpp +++ b/include/nlohmann/detail/json_ref.hpp @@ -35,7 +35,7 @@ class json_ref {} // class should be movable only - json_ref(json_ref&&) = default; + json_ref(json_ref&&) noexcept = default; json_ref(const json_ref&) = delete; json_ref& operator=(const json_ref&) = delete; json_ref& operator=(json_ref&&) = delete; From 0dcb01bc47394c1b8fc0a136d47a834f1f138959 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Thu, 28 Jan 2021 16:38:11 +0100 Subject: [PATCH 066/113] :rotating_light: fix warnings --- .clang-tidy | 2 + cmake/ci.cmake | 2 +- include/nlohmann/detail/exceptions.hpp | 2 +- .../nlohmann/detail/input/binary_reader.hpp | 6 +- include/nlohmann/detail/input/json_sax.hpp | 8 +-- include/nlohmann/detail/input/lexer.hpp | 4 +- include/nlohmann/detail/output/serializer.hpp | 12 ++-- include/nlohmann/json.hpp | 24 ++++---- single_include/nlohmann/json.hpp | 60 +++++++++---------- 9 files changed, 61 insertions(+), 59 deletions(-) diff --git a/.clang-tidy b/.clang-tidy index 6bcd36d6e3..63e2902eff 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -5,6 +5,7 @@ Checks: '*, -cppcoreguidelines-avoid-non-const-global-variables, -cppcoreguidelines-macro-usage, -cppcoreguidelines-pro-bounds-array-to-pointer-decay, + -cppcoreguidelines-pro-type-reinterpret-cast, -cppcoreguidelines-pro-type-union-access, -fuchsia-default-arguments-calls, -fuchsia-default-arguments-declarations, @@ -26,6 +27,7 @@ Checks: '*, -modernize-use-trailing-return-type, -readability-function-size, -readability-magic-numbers, + -readability-redundant-access-specifiers, -readability-uppercase-literal-suffix, -llvmlibc-*' diff --git a/cmake/ci.cmake b/cmake/ci.cmake index 7d83aa3d61..433f4f1f72 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -463,7 +463,7 @@ add_custom_target(ci_cppcheck ############################################################################### add_custom_target(ci_cpplint - COMMAND ${Python3_EXECUTABLE} ${CMAKE_SOURCE_DIR}/third_party/cpplint/cpplint.py --filter=-whitespace,-legal,-runtime/references,-runtime/explicit,-runtime/indentation_namespace,-readability/casting --quiet --recursive ${SRC_FILES} + COMMAND ${Python3_EXECUTABLE} ${CMAKE_SOURCE_DIR}/third_party/cpplint/cpplint.py --filter=-whitespace,-legal,-runtime/references,-runtime/explicit,-runtime/indentation_namespace,-readability/casting,-readability/nolint --quiet --recursive ${SRC_FILES} COMMENT "Check code with cpplint" ) diff --git a/include/nlohmann/detail/exceptions.hpp b/include/nlohmann/detail/exceptions.hpp index dd92897d5a..d310d795c9 100644 --- a/include/nlohmann/detail/exceptions.hpp +++ b/include/nlohmann/detail/exceptions.hpp @@ -54,7 +54,7 @@ class exception : public std::exception } /// the id of the exception - const int id; + const int id; // NOLINT(cppcoreguidelines-non-private-member-variables-in-classes) protected: JSON_HEDLEY_NON_NULL(3) diff --git a/include/nlohmann/detail/input/binary_reader.hpp b/include/nlohmann/detail/input/binary_reader.hpp index bfe18a0fbc..a245e88fdd 100644 --- a/include/nlohmann/detail/input/binary_reader.hpp +++ b/include/nlohmann/detail/input/binary_reader.hpp @@ -78,9 +78,9 @@ class binary_reader // make class move-only binary_reader(const binary_reader&) = delete; - binary_reader(binary_reader&&) = default; + binary_reader(binary_reader&&) noexcept = default; binary_reader& operator=(const binary_reader&) = delete; - binary_reader& operator=(binary_reader&&) = default; + binary_reader& operator=(binary_reader&&) noexcept = default; ~binary_reader() = default; /*! @@ -315,7 +315,7 @@ class binary_reader default: // anything else not supported (yet) { std::array cr{{}}; - (std::snprintf)(cr.data(), cr.size(), "%.2hhX", static_cast(element_type)); + (std::snprintf)(cr.data(), cr.size(), "%.2hhX", static_cast(element_type)); // NOLINT(cppcoreguidelines-pro-type-vararg,hicpp-vararg) return sax->parse_error(element_type_parse_position, std::string(cr.data()), parse_error::create(114, element_type_parse_position, "Unsupported BSON record type 0x" + std::string(cr.data()))); } } diff --git a/include/nlohmann/detail/input/json_sax.hpp b/include/nlohmann/detail/input/json_sax.hpp index c41493d6d2..a7ad2abcac 100644 --- a/include/nlohmann/detail/input/json_sax.hpp +++ b/include/nlohmann/detail/input/json_sax.hpp @@ -166,9 +166,9 @@ class json_sax_dom_parser // make class move-only json_sax_dom_parser(const json_sax_dom_parser&) = delete; - json_sax_dom_parser(json_sax_dom_parser&&) = default; + json_sax_dom_parser(json_sax_dom_parser&&) noexcept = default; json_sax_dom_parser& operator=(const json_sax_dom_parser&) = delete; - json_sax_dom_parser& operator=(json_sax_dom_parser&&) = default; + json_sax_dom_parser& operator=(json_sax_dom_parser&&) noexcept = default; ~json_sax_dom_parser() = default; bool null() @@ -341,9 +341,9 @@ class json_sax_dom_callback_parser // make class move-only json_sax_dom_callback_parser(const json_sax_dom_callback_parser&) = delete; - json_sax_dom_callback_parser(json_sax_dom_callback_parser&&) = default; + json_sax_dom_callback_parser(json_sax_dom_callback_parser&&) noexcept = default; json_sax_dom_callback_parser& operator=(const json_sax_dom_callback_parser&) = delete; - json_sax_dom_callback_parser& operator=(json_sax_dom_callback_parser&&) = default; + json_sax_dom_callback_parser& operator=(json_sax_dom_callback_parser&&) noexcept = default; ~json_sax_dom_callback_parser() = default; bool null() diff --git a/include/nlohmann/detail/input/lexer.hpp b/include/nlohmann/detail/input/lexer.hpp index eae82eaa32..c2141ed263 100644 --- a/include/nlohmann/detail/input/lexer.hpp +++ b/include/nlohmann/detail/input/lexer.hpp @@ -120,9 +120,9 @@ class lexer : public lexer_base // delete because of pointer members lexer(const lexer&) = delete; - lexer(lexer&&) = default; + lexer(lexer&&) noexcept = default; lexer& operator=(lexer&) = delete; - lexer& operator=(lexer&&) = default; + lexer& operator=(lexer&&) noexcept = default; ~lexer() = default; private: diff --git a/include/nlohmann/detail/output/serializer.hpp b/include/nlohmann/detail/output/serializer.hpp index 0a34c8011e..ab155ecb6c 100644 --- a/include/nlohmann/detail/output/serializer.hpp +++ b/include/nlohmann/detail/output/serializer.hpp @@ -379,7 +379,7 @@ class serializer */ void dump_escaped(const string_t& s, const bool ensure_ascii) { - std::uint32_t codepoint; + std::uint32_t codepoint{}; std::uint8_t state = UTF8_ACCEPT; std::size_t bytes = 0; // number of bytes written to string_buffer @@ -700,12 +700,12 @@ class serializer } // use a pointer to fill the buffer - auto buffer_ptr = number_buffer.begin(); + auto* buffer_ptr = number_buffer.begin(); const bool is_negative = std::is_same::value && !(x >= 0); // see issue #755 number_unsigned_t abs_value; - unsigned int n_chars; + unsigned int n_chars{}; if (is_negative) { @@ -805,8 +805,8 @@ class serializer // erase thousands separator if (thousands_sep != '\0') { - const auto end = std::remove(number_buffer.begin(), - number_buffer.begin() + len, thousands_sep); + auto* const end = std::remove(number_buffer.begin(), + number_buffer.begin() + len, thousands_sep); std::fill(end, number_buffer.end(), '\0'); JSON_ASSERT((end - number_buffer.begin()) <= len); len = (end - number_buffer.begin()); @@ -815,7 +815,7 @@ class serializer // convert decimal point to '.' if (decimal_point != '\0' && decimal_point != '.') { - const auto dec_pos = std::find(number_buffer.begin(), number_buffer.end(), decimal_point); + auto* const dec_pos = std::find(number_buffer.begin(), number_buffer.end(), decimal_point); if (dec_pos != number_buffer.end()) { *dec_pos = '.'; diff --git a/include/nlohmann/json.hpp b/include/nlohmann/json.hpp index bf40e95dbc..e5304a7799 100644 --- a/include/nlohmann/json.hpp +++ b/include/nlohmann/json.hpp @@ -6201,7 +6201,7 @@ class basic_json */ template::value, int>::type = 0> - friend bool operator==(const_reference lhs, const ScalarType rhs) noexcept + friend bool operator==(const_reference lhs, ScalarType rhs) noexcept { return lhs == basic_json(rhs); } @@ -6212,7 +6212,7 @@ class basic_json */ template::value, int>::type = 0> - friend bool operator==(const ScalarType lhs, const_reference rhs) noexcept + friend bool operator==(ScalarType lhs, const_reference rhs) noexcept { return basic_json(lhs) == rhs; } @@ -6246,7 +6246,7 @@ class basic_json */ template::value, int>::type = 0> - friend bool operator!=(const_reference lhs, const ScalarType rhs) noexcept + friend bool operator!=(const_reference lhs, ScalarType rhs) noexcept { return lhs != basic_json(rhs); } @@ -6257,7 +6257,7 @@ class basic_json */ template::value, int>::type = 0> - friend bool operator!=(const ScalarType lhs, const_reference rhs) noexcept + friend bool operator!=(ScalarType lhs, const_reference rhs) noexcept { return basic_json(lhs) != rhs; } @@ -6367,7 +6367,7 @@ class basic_json */ template::value, int>::type = 0> - friend bool operator<(const_reference lhs, const ScalarType rhs) noexcept + friend bool operator<(const_reference lhs, ScalarType rhs) noexcept { return lhs < basic_json(rhs); } @@ -6378,7 +6378,7 @@ class basic_json */ template::value, int>::type = 0> - friend bool operator<(const ScalarType lhs, const_reference rhs) noexcept + friend bool operator<(ScalarType lhs, const_reference rhs) noexcept { return basic_json(lhs) < rhs; } @@ -6413,7 +6413,7 @@ class basic_json */ template::value, int>::type = 0> - friend bool operator<=(const_reference lhs, const ScalarType rhs) noexcept + friend bool operator<=(const_reference lhs, ScalarType rhs) noexcept { return lhs <= basic_json(rhs); } @@ -6424,7 +6424,7 @@ class basic_json */ template::value, int>::type = 0> - friend bool operator<=(const ScalarType lhs, const_reference rhs) noexcept + friend bool operator<=(ScalarType lhs, const_reference rhs) noexcept { return basic_json(lhs) <= rhs; } @@ -6459,7 +6459,7 @@ class basic_json */ template::value, int>::type = 0> - friend bool operator>(const_reference lhs, const ScalarType rhs) noexcept + friend bool operator>(const_reference lhs, ScalarType rhs) noexcept { return lhs > basic_json(rhs); } @@ -6470,7 +6470,7 @@ class basic_json */ template::value, int>::type = 0> - friend bool operator>(const ScalarType lhs, const_reference rhs) noexcept + friend bool operator>(ScalarType lhs, const_reference rhs) noexcept { return basic_json(lhs) > rhs; } @@ -6505,7 +6505,7 @@ class basic_json */ template::value, int>::type = 0> - friend bool operator>=(const_reference lhs, const ScalarType rhs) noexcept + friend bool operator>=(const_reference lhs, ScalarType rhs) noexcept { return lhs >= basic_json(rhs); } @@ -6516,7 +6516,7 @@ class basic_json */ template::value, int>::type = 0> - friend bool operator>=(const ScalarType lhs, const_reference rhs) noexcept + friend bool operator>=(ScalarType lhs, const_reference rhs) noexcept { return basic_json(lhs) >= rhs; } diff --git a/single_include/nlohmann/json.hpp b/single_include/nlohmann/json.hpp index 8b3ede008d..e82501475f 100644 --- a/single_include/nlohmann/json.hpp +++ b/single_include/nlohmann/json.hpp @@ -2423,7 +2423,7 @@ class exception : public std::exception } /// the id of the exception - const int id; + const int id; // NOLINT(cppcoreguidelines-non-private-member-variables-in-classes) protected: JSON_HEDLEY_NON_NULL(3) @@ -3643,7 +3643,7 @@ void from_json(const BasicJsonType& j, std::valarray& l) } template -auto from_json(const BasicJsonType& j, T (&arr)[N]) +auto from_json(const BasicJsonType& j, T (&arr)[N]) // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays) -> decltype(j.template get(), void()) { for (std::size_t i = 0; i < N; ++i) @@ -5453,9 +5453,9 @@ class json_sax_dom_parser // make class move-only json_sax_dom_parser(const json_sax_dom_parser&) = delete; - json_sax_dom_parser(json_sax_dom_parser&&) = default; + json_sax_dom_parser(json_sax_dom_parser&&) noexcept = default; json_sax_dom_parser& operator=(const json_sax_dom_parser&) = delete; - json_sax_dom_parser& operator=(json_sax_dom_parser&&) = default; + json_sax_dom_parser& operator=(json_sax_dom_parser&&) noexcept = default; ~json_sax_dom_parser() = default; bool null() @@ -5628,9 +5628,9 @@ class json_sax_dom_callback_parser // make class move-only json_sax_dom_callback_parser(const json_sax_dom_callback_parser&) = delete; - json_sax_dom_callback_parser(json_sax_dom_callback_parser&&) = default; + json_sax_dom_callback_parser(json_sax_dom_callback_parser&&) noexcept = default; json_sax_dom_callback_parser& operator=(const json_sax_dom_callback_parser&) = delete; - json_sax_dom_callback_parser& operator=(json_sax_dom_callback_parser&&) = default; + json_sax_dom_callback_parser& operator=(json_sax_dom_callback_parser&&) noexcept = default; ~json_sax_dom_callback_parser() = default; bool null() @@ -6107,9 +6107,9 @@ class lexer : public lexer_base // delete because of pointer members lexer(const lexer&) = delete; - lexer(lexer&&) = default; + lexer(lexer&&) noexcept = default; lexer& operator=(lexer&) = delete; - lexer& operator=(lexer&&) = default; + lexer& operator=(lexer&&) noexcept = default; ~lexer() = default; private: @@ -7824,9 +7824,9 @@ class binary_reader // make class move-only binary_reader(const binary_reader&) = delete; - binary_reader(binary_reader&&) = default; + binary_reader(binary_reader&&) noexcept = default; binary_reader& operator=(const binary_reader&) = delete; - binary_reader& operator=(binary_reader&&) = default; + binary_reader& operator=(binary_reader&&) noexcept = default; ~binary_reader() = default; /*! @@ -8061,7 +8061,7 @@ class binary_reader default: // anything else not supported (yet) { std::array cr{{}}; - (std::snprintf)(cr.data(), cr.size(), "%.2hhX", static_cast(element_type)); + (std::snprintf)(cr.data(), cr.size(), "%.2hhX", static_cast(element_type)); // NOLINT(cppcoreguidelines-pro-type-vararg,hicpp-vararg) return sax->parse_error(element_type_parse_position, std::string(cr.data()), parse_error::create(114, element_type_parse_position, "Unsupported BSON record type 0x" + std::string(cr.data()))); } } @@ -12671,7 +12671,7 @@ class json_ref {} // class should be movable only - json_ref(json_ref&&) = default; + json_ref(json_ref&&) noexcept = default; json_ref(const json_ref&) = delete; json_ref& operator=(const json_ref&) = delete; json_ref& operator=(json_ref&&) = delete; @@ -15927,7 +15927,7 @@ class serializer */ void dump_escaped(const string_t& s, const bool ensure_ascii) { - std::uint32_t codepoint; + std::uint32_t codepoint{}; std::uint8_t state = UTF8_ACCEPT; std::size_t bytes = 0; // number of bytes written to string_buffer @@ -16248,12 +16248,12 @@ class serializer } // use a pointer to fill the buffer - auto buffer_ptr = number_buffer.begin(); + auto* buffer_ptr = number_buffer.begin(); const bool is_negative = std::is_same::value && !(x >= 0); // see issue #755 number_unsigned_t abs_value; - unsigned int n_chars; + unsigned int n_chars{}; if (is_negative) { @@ -16353,8 +16353,8 @@ class serializer // erase thousands separator if (thousands_sep != '\0') { - const auto end = std::remove(number_buffer.begin(), - number_buffer.begin() + len, thousands_sep); + auto* const end = std::remove(number_buffer.begin(), + number_buffer.begin() + len, thousands_sep); std::fill(end, number_buffer.end(), '\0'); JSON_ASSERT((end - number_buffer.begin()) <= len); len = (end - number_buffer.begin()); @@ -16363,7 +16363,7 @@ class serializer // convert decimal point to '.' if (decimal_point != '\0' && decimal_point != '.') { - const auto dec_pos = std::find(number_buffer.begin(), number_buffer.end(), decimal_point); + auto* const dec_pos = std::find(number_buffer.begin(), number_buffer.end(), decimal_point); if (dec_pos != number_buffer.end()) { *dec_pos = '.'; @@ -22822,7 +22822,7 @@ class basic_json */ template::value, int>::type = 0> - friend bool operator==(const_reference lhs, const ScalarType rhs) noexcept + friend bool operator==(const_reference lhs, ScalarType rhs) noexcept { return lhs == basic_json(rhs); } @@ -22833,7 +22833,7 @@ class basic_json */ template::value, int>::type = 0> - friend bool operator==(const ScalarType lhs, const_reference rhs) noexcept + friend bool operator==(ScalarType lhs, const_reference rhs) noexcept { return basic_json(lhs) == rhs; } @@ -22867,7 +22867,7 @@ class basic_json */ template::value, int>::type = 0> - friend bool operator!=(const_reference lhs, const ScalarType rhs) noexcept + friend bool operator!=(const_reference lhs, ScalarType rhs) noexcept { return lhs != basic_json(rhs); } @@ -22878,7 +22878,7 @@ class basic_json */ template::value, int>::type = 0> - friend bool operator!=(const ScalarType lhs, const_reference rhs) noexcept + friend bool operator!=(ScalarType lhs, const_reference rhs) noexcept { return basic_json(lhs) != rhs; } @@ -22988,7 +22988,7 @@ class basic_json */ template::value, int>::type = 0> - friend bool operator<(const_reference lhs, const ScalarType rhs) noexcept + friend bool operator<(const_reference lhs, ScalarType rhs) noexcept { return lhs < basic_json(rhs); } @@ -22999,7 +22999,7 @@ class basic_json */ template::value, int>::type = 0> - friend bool operator<(const ScalarType lhs, const_reference rhs) noexcept + friend bool operator<(ScalarType lhs, const_reference rhs) noexcept { return basic_json(lhs) < rhs; } @@ -23034,7 +23034,7 @@ class basic_json */ template::value, int>::type = 0> - friend bool operator<=(const_reference lhs, const ScalarType rhs) noexcept + friend bool operator<=(const_reference lhs, ScalarType rhs) noexcept { return lhs <= basic_json(rhs); } @@ -23045,7 +23045,7 @@ class basic_json */ template::value, int>::type = 0> - friend bool operator<=(const ScalarType lhs, const_reference rhs) noexcept + friend bool operator<=(ScalarType lhs, const_reference rhs) noexcept { return basic_json(lhs) <= rhs; } @@ -23080,7 +23080,7 @@ class basic_json */ template::value, int>::type = 0> - friend bool operator>(const_reference lhs, const ScalarType rhs) noexcept + friend bool operator>(const_reference lhs, ScalarType rhs) noexcept { return lhs > basic_json(rhs); } @@ -23091,7 +23091,7 @@ class basic_json */ template::value, int>::type = 0> - friend bool operator>(const ScalarType lhs, const_reference rhs) noexcept + friend bool operator>(ScalarType lhs, const_reference rhs) noexcept { return basic_json(lhs) > rhs; } @@ -23126,7 +23126,7 @@ class basic_json */ template::value, int>::type = 0> - friend bool operator>=(const_reference lhs, const ScalarType rhs) noexcept + friend bool operator>=(const_reference lhs, ScalarType rhs) noexcept { return lhs >= basic_json(rhs); } @@ -23137,7 +23137,7 @@ class basic_json */ template::value, int>::type = 0> - friend bool operator>=(const ScalarType lhs, const_reference rhs) noexcept + friend bool operator>=(ScalarType lhs, const_reference rhs) noexcept { return basic_json(lhs) >= rhs; } From bfd4c7c739dad6c46bc38e6bbcc62150842d91b9 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Thu, 28 Jan 2021 17:10:52 +0100 Subject: [PATCH 067/113] :rotating_light: fix warnings --- .clang-tidy | 1 + .../nlohmann/detail/conversions/to_chars.hpp | 2 +- include/nlohmann/detail/json_pointer.hpp | 2 +- include/nlohmann/detail/output/serializer.hpp | 2 +- include/nlohmann/json.hpp | 2 +- single_include/nlohmann/json.hpp | 8 ++-- test/src/unit-bson.cpp | 2 +- test/src/unit-cbor.cpp | 2 +- test/src/unit-class_parser.cpp | 2 +- test/src/unit-constructor1.cpp | 48 +++++++++++++------ test/src/unit-conversions.cpp | 8 ++-- test/src/unit-msgpack.cpp | 2 +- test/src/unit-regression2.cpp | 4 +- test/src/unit-to_chars.cpp | 6 +-- test/src/unit-ubjson.cpp | 2 +- 15 files changed, 57 insertions(+), 36 deletions(-) diff --git a/.clang-tidy b/.clang-tidy index 63e2902eff..cdbba1fca1 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -5,6 +5,7 @@ Checks: '*, -cppcoreguidelines-avoid-non-const-global-variables, -cppcoreguidelines-macro-usage, -cppcoreguidelines-pro-bounds-array-to-pointer-decay, + -cppcoreguidelines-pro-bounds-pointer-arithmetic, -cppcoreguidelines-pro-type-reinterpret-cast, -cppcoreguidelines-pro-type-union-access, -fuchsia-default-arguments-calls, diff --git a/include/nlohmann/detail/conversions/to_chars.hpp b/include/nlohmann/detail/conversions/to_chars.hpp index 49ed0f913e..5b098eb8d2 100644 --- a/include/nlohmann/detail/conversions/to_chars.hpp +++ b/include/nlohmann/detail/conversions/to_chars.hpp @@ -618,7 +618,7 @@ inline void grisu2_digit_gen(char* buffer, int& length, int& decimal_exponent, JSON_ASSERT(p1 > 0); - std::uint32_t pow10; + std::uint32_t pow10{}; const int k = find_largest_pow10(p1, pow10); // 10^(k-1) <= p1 < 10^k, pow10 = 10^(k-1) diff --git a/include/nlohmann/detail/json_pointer.hpp b/include/nlohmann/detail/json_pointer.hpp index 4209e676a6..b89a77a6d4 100644 --- a/include/nlohmann/detail/json_pointer.hpp +++ b/include/nlohmann/detail/json_pointer.hpp @@ -180,7 +180,7 @@ class json_pointer @since version 3.6.0 */ - friend json_pointer operator/(const json_pointer& ptr, std::string token) + friend json_pointer operator/(const json_pointer& ptr, std::string token) // NOLINT(performance-unnecessary-value-param) { return json_pointer(ptr) /= std::move(token); } diff --git a/include/nlohmann/detail/output/serializer.hpp b/include/nlohmann/detail/output/serializer.hpp index ab155ecb6c..54250e38e9 100644 --- a/include/nlohmann/detail/output/serializer.hpp +++ b/include/nlohmann/detail/output/serializer.hpp @@ -700,7 +700,7 @@ class serializer } // use a pointer to fill the buffer - auto* buffer_ptr = number_buffer.begin(); + auto buffer_ptr = number_buffer.begin(); const bool is_negative = std::is_same::value && !(x >= 0); // see issue #755 number_unsigned_t abs_value; diff --git a/include/nlohmann/json.hpp b/include/nlohmann/json.hpp index e5304a7799..2eb288eccc 100644 --- a/include/nlohmann/json.hpp +++ b/include/nlohmann/json.hpp @@ -2821,7 +2821,7 @@ class basic_json static ReferenceType get_ref_impl(ThisType& obj) { // delegate the call to get_ptr<>() - auto ptr = obj.template get_ptr::type>(); + auto* ptr = obj.template get_ptr::type>(); if (JSON_HEDLEY_LIKELY(ptr != nullptr)) { diff --git a/single_include/nlohmann/json.hpp b/single_include/nlohmann/json.hpp index e82501475f..8d6bcb1b1e 100644 --- a/single_include/nlohmann/json.hpp +++ b/single_include/nlohmann/json.hpp @@ -11834,7 +11834,7 @@ class json_pointer @since version 3.6.0 */ - friend json_pointer operator/(const json_pointer& ptr, std::string token) + friend json_pointer operator/(const json_pointer& ptr, std::string token) // NOLINT(performance-unnecessary-value-param) { return json_pointer(ptr) /= std::move(token); } @@ -15071,7 +15071,7 @@ inline void grisu2_digit_gen(char* buffer, int& length, int& decimal_exponent, JSON_ASSERT(p1 > 0); - std::uint32_t pow10; + std::uint32_t pow10{}; const int k = find_largest_pow10(p1, pow10); // 10^(k-1) <= p1 < 10^k, pow10 = 10^(k-1) @@ -16248,7 +16248,7 @@ class serializer } // use a pointer to fill the buffer - auto* buffer_ptr = number_buffer.begin(); + auto buffer_ptr = number_buffer.begin(); const bool is_negative = std::is_same::value && !(x >= 0); // see issue #755 number_unsigned_t abs_value; @@ -19442,7 +19442,7 @@ class basic_json static ReferenceType get_ref_impl(ThisType& obj) { // delegate the call to get_ptr<>() - auto ptr = obj.template get_ptr::type>(); + auto* ptr = obj.template get_ptr::type>(); if (JSON_HEDLEY_LIKELY(ptr != nullptr)) { diff --git a/test/src/unit-bson.cpp b/test/src/unit-bson.cpp index e0e020de0a..b89bddee63 100644 --- a/test/src/unit-bson.cpp +++ b/test/src/unit-bson.cpp @@ -738,7 +738,7 @@ class SaxCountdown return events_left-- > 0; } - bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, const json::exception& /*unused*/) + bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, const json::exception& /*unused*/) // NOLINT(readability-convert-member-functions-to-static) { return false; } diff --git a/test/src/unit-cbor.cpp b/test/src/unit-cbor.cpp index bfbcf54044..00aafba65b 100644 --- a/test/src/unit-cbor.cpp +++ b/test/src/unit-cbor.cpp @@ -109,7 +109,7 @@ class SaxCountdown return events_left-- > 0; } - bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, const json::exception& /*unused*/) + bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, const json::exception& /*unused*/) // NOLINT(readability-convert-member-functions-to-static) { return false; } diff --git a/test/src/unit-class_parser.cpp b/test/src/unit-class_parser.cpp index 6701f5f4e9..247d9d5fe5 100644 --- a/test/src/unit-class_parser.cpp +++ b/test/src/unit-class_parser.cpp @@ -1691,7 +1691,7 @@ TEST_CASE("parser class") SECTION("from array") { - uint8_t v[] = {'t', 'r', 'u', 'e'}; + uint8_t v[] = {'t', 'r', 'u', 'e'}; // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays) json j; json::parser(nlohmann::detail::input_adapter(std::begin(v), std::end(v))).parse(true, j); CHECK(j == json(true)); diff --git a/test/src/unit-constructor1.cpp b/test/src/unit-constructor1.cpp index 6838dd0e52..ba9bf12b73 100644 --- a/test/src/unit-constructor1.cpp +++ b/test/src/unit-constructor1.cpp @@ -436,7 +436,7 @@ TEST_CASE("constructors") SECTION("char[]") { - char s[] {"Hello world"}; + char s[] {"Hello world"}; // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays) json j(s); CHECK(j.type() == json::value_t::string); CHECK(j == j_reference); @@ -1115,24 +1115,29 @@ TEST_CASE("constructors") { SECTION("string") { - // This should break through any short string optimization in std::string - std::string source(1024, '!'); - const char* source_addr = source.data(); - SECTION("constructor with implicit types (array)") { + // This should break through any short string optimization in std::string + std::string source(1024, '!'); + const char* source_addr = source.data(); json j = {std::move(source)}; CHECK(j[0].get_ref().data() == source_addr); } SECTION("constructor with implicit types (object)") { + // This should break through any short string optimization in std::string + std::string source(1024, '!'); + const char* source_addr = source.data(); json j = {{"key", std::move(source)}}; CHECK(j["key"].get_ref().data() == source_addr); } SECTION("constructor with implicit types (object key)") { + // This should break through any short string optimization in std::string + std::string source(1024, '!'); + const char* source_addr = source.data(); json j = {{std::move(source), 42}}; CHECK(j.get_ref().begin()->first.data() == source_addr); } @@ -1140,29 +1145,34 @@ TEST_CASE("constructors") SECTION("array") { - json::array_t source = {1, 2, 3}; - const json* source_addr = source.data(); - SECTION("constructor with implicit types (array)") { + json::array_t source = {1, 2, 3}; + const json* source_addr = source.data(); json j {std::move(source)}; CHECK(j[0].get_ref().data() == source_addr); } SECTION("constructor with implicit types (object)") { + json::array_t source = {1, 2, 3}; + const json* source_addr = source.data(); json j {{"key", std::move(source)}}; CHECK(j["key"].get_ref().data() == source_addr); } SECTION("assignment with implicit types (array)") { + json::array_t source = {1, 2, 3}; + const json* source_addr = source.data(); json j = {std::move(source)}; CHECK(j[0].get_ref().data() == source_addr); } SECTION("assignment with implicit types (object)") { + json::array_t source = {1, 2, 3}; + const json* source_addr = source.data(); json j = {{"key", std::move(source)}}; CHECK(j["key"].get_ref().data() == source_addr); } @@ -1170,29 +1180,34 @@ TEST_CASE("constructors") SECTION("object") { - json::object_t source = {{"hello", "world"}}; - const json* source_addr = &source.at("hello"); - SECTION("constructor with implicit types (array)") { + json::object_t source = {{"hello", "world"}}; + const json* source_addr = &source.at("hello"); json j {std::move(source)}; CHECK(&(j[0].get_ref().at("hello")) == source_addr); } SECTION("constructor with implicit types (object)") { + json::object_t source = {{"hello", "world"}}; + const json* source_addr = &source.at("hello"); json j {{"key", std::move(source)}}; CHECK(&(j["key"].get_ref().at("hello")) == source_addr); } SECTION("assignment with implicit types (array)") { + json::object_t source = {{"hello", "world"}}; + const json* source_addr = &source.at("hello"); json j = {std::move(source)}; CHECK(&(j[0].get_ref().at("hello")) == source_addr); } SECTION("assignment with implicit types (object)") { + json::object_t source = {{"hello", "world"}}; + const json* source_addr = &source.at("hello"); json j = {{"key", std::move(source)}}; CHECK(&(j["key"].get_ref().at("hello")) == source_addr); } @@ -1200,29 +1215,34 @@ TEST_CASE("constructors") SECTION("json") { - json source {1, 2, 3}; - const json* source_addr = &source[0]; - SECTION("constructor with implicit types (array)") { + json source {1, 2, 3}; + const json* source_addr = &source[0]; json j {std::move(source), {}}; CHECK(&j[0][0] == source_addr); } SECTION("constructor with implicit types (object)") { + json source {1, 2, 3}; + const json* source_addr = &source[0]; json j {{"key", std::move(source)}}; CHECK(&j["key"][0] == source_addr); } SECTION("assignment with implicit types (array)") { + json source {1, 2, 3}; + const json* source_addr = &source[0]; json j = {std::move(source), {}}; CHECK(&j[0][0] == source_addr); } SECTION("assignment with implicit types (object)") { + json source {1, 2, 3}; + const json* source_addr = &source[0]; json j = {{"key", std::move(source)}}; CHECK(&j["key"][0] == source_addr); } diff --git a/test/src/unit-conversions.cpp b/test/src/unit-conversions.cpp index cc6c7d0787..6117226e29 100644 --- a/test/src/unit-conversions.cpp +++ b/test/src/unit-conversions.cpp @@ -282,8 +282,8 @@ TEST_CASE("value conversion") SECTION("built-in arrays") { - const char str[] = "a string"; - const int nbs[] = {0, 1, 2}; + const char str[] = "a string"; // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays) + const int nbs[] = {0, 1, 2}; // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays) json j2 = nbs; json j3 = str; @@ -387,8 +387,8 @@ TEST_CASE("value conversion") SECTION("built-in arrays") { - const int nbs[] = {0, 1, 2}; - int nbs2[] = {0, 0, 0}; + const int nbs[] = {0, 1, 2}; // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays) + int nbs2[] = {0, 0, 0}; // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays) json j2 = nbs; j2.get_to(nbs2); diff --git a/test/src/unit-msgpack.cpp b/test/src/unit-msgpack.cpp index 31a22fedcf..dde28c4b7b 100644 --- a/test/src/unit-msgpack.cpp +++ b/test/src/unit-msgpack.cpp @@ -107,7 +107,7 @@ class SaxCountdown return events_left-- > 0; } - bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, const json::exception& /*unused*/) + bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, const json::exception& /*unused*/) // NOLINT(readability-convert-member-functions-to-static) { return false; } diff --git a/test/src/unit-regression2.cpp b/test/src/unit-regression2.cpp index 6dd6e53d5a..210bc6787a 100644 --- a/test/src/unit-regression2.cpp +++ b/test/src/unit-regression2.cpp @@ -318,7 +318,7 @@ TEST_CASE("regression tests 2") SECTION("test case in issue #1445") { nlohmann::json dump_test; - const int data[] = + const std::array data = { 109, 108, 103, 125, -122, -53, 115, 18, 3, 0, 102, 19, 1, 15, @@ -395,7 +395,7 @@ TEST_CASE("regression tests 2") SECTION("string array") { - const char input[] = { 'B', 0x00 }; + const std::array input = { 'B', 0x00 }; json cbor = json::from_cbor(input, true, false); CHECK(cbor.is_discarded()); } diff --git a/test/src/unit-to_chars.cpp b/test/src/unit-to_chars.cpp index 8ae05cf952..3e8bbec3c2 100644 --- a/test/src/unit-to_chars.cpp +++ b/test/src/unit-to_chars.cpp @@ -217,12 +217,12 @@ TEST_CASE("digit gen") CAPTURE(digits) CAPTURE(expected_exponent) - char buf[32]; + std::array buf{}; int len = 0; int exponent = 0; - nlohmann::detail::dtoa_impl::grisu2(buf, len, exponent, number); + nlohmann::detail::dtoa_impl::grisu2(buf.data(), len, exponent, number); - CHECK(digits == std::string(buf, buf + len)); + CHECK(digits == std::string(buf.data(), buf.data() + len)); CHECK(expected_exponent == exponent); }; diff --git a/test/src/unit-ubjson.cpp b/test/src/unit-ubjson.cpp index 47fcf24496..0e88371693 100644 --- a/test/src/unit-ubjson.cpp +++ b/test/src/unit-ubjson.cpp @@ -106,7 +106,7 @@ class SaxCountdown return events_left-- > 0; } - bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, const json::exception& /*unused*/) + bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, const json::exception& /*unused*/) // NOLINT(readability-convert-member-functions-to-static) { return false; } From 95f50a3416de3c49477a3d95ed07f00d80a51ed2 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Thu, 28 Jan 2021 17:17:15 +0100 Subject: [PATCH 068/113] :rotating_light: fix warnings --- test/src/unit-regression2.cpp | 36 ++++++++++++++++++----------------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/test/src/unit-regression2.cpp b/test/src/unit-regression2.cpp index 210bc6787a..bde66d31c6 100644 --- a/test/src/unit-regression2.cpp +++ b/test/src/unit-regression2.cpp @@ -320,22 +320,24 @@ TEST_CASE("regression tests 2") nlohmann::json dump_test; const std::array data = { - 109, 108, 103, 125, -122, -53, 115, - 18, 3, 0, 102, 19, 1, 15, - -110, 13, -3, -1, -81, 32, 2, - 0, 0, 0, 0, 0, 0, 0, - 8, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, -80, 2, - 0, 0, 96, -118, 46, -116, 46, - 109, -84, -87, 108, 14, 109, -24, - -83, 13, -18, -51, -83, -52, -115, - 14, 6, 32, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, - 64, 3, 0, 0, 0, 35, -74, - -73, 55, 57, -128, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, - 0, 0, 33, 0, 0, 0, -96, - -54, -28, -26 + { + 109, 108, 103, 125, -122, -53, 115, + 18, 3, 0, 102, 19, 1, 15, + -110, 13, -3, -1, -81, 32, 2, + 0, 0, 0, 0, 0, 0, 0, + 8, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, -80, 2, + 0, 0, 96, -118, 46, -116, 46, + 109, -84, -87, 108, 14, 109, -24, + -83, 13, -18, -51, -83, -52, -115, + 14, 6, 32, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, + 64, 3, 0, 0, 0, 35, -74, + -73, 55, 57, -128, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, + 0, 0, 33, 0, 0, 0, -96, + -54, -28, -26 + } }; std::string s; for (int i : data) @@ -395,7 +397,7 @@ TEST_CASE("regression tests 2") SECTION("string array") { - const std::array input = { 'B', 0x00 }; + const std::array input = {{ 'B', 0x00 }}; json cbor = json::from_cbor(input, true, false); CHECK(cbor.is_discarded()); } From e2868eed3384cf5df6d404872891547889fb3ad7 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Thu, 28 Jan 2021 19:42:53 +0100 Subject: [PATCH 069/113] :rotating_light: fix warnings --- .../nlohmann/detail/input/binary_reader.hpp | 2 +- .../nlohmann/detail/input/input_adapters.hpp | 11 +++--- include/nlohmann/detail/input/json_sax.hpp | 8 ++--- include/nlohmann/detail/input/lexer.hpp | 2 +- single_include/nlohmann/json.hpp | 23 ++++++------ test/src/unit-class_lexer.cpp | 4 +-- test/src/unit-deserialization.cpp | 36 +++++++++---------- test/src/unit-regression1.cpp | 20 ++++++----- test/src/unit-regression2.cpp | 4 +-- test/src/unit-to_chars.cpp | 6 ++-- 10 files changed, 61 insertions(+), 55 deletions(-) diff --git a/include/nlohmann/detail/input/binary_reader.hpp b/include/nlohmann/detail/input/binary_reader.hpp index a245e88fdd..3dafb1b81d 100644 --- a/include/nlohmann/detail/input/binary_reader.hpp +++ b/include/nlohmann/detail/input/binary_reader.hpp @@ -71,7 +71,7 @@ class binary_reader @param[in] adapter input adapter to read from */ - explicit binary_reader(InputAdapterType&& adapter) : ia(std::move(adapter)) + explicit binary_reader(InputAdapterType&& adapter) noexcept : ia(std::move(adapter)) { (void)detail::is_sax_static_asserts {}; } diff --git a/include/nlohmann/detail/input/input_adapters.hpp b/include/nlohmann/detail/input/input_adapters.hpp index cb050b1d25..9007d224b8 100644 --- a/include/nlohmann/detail/input/input_adapters.hpp +++ b/include/nlohmann/detail/input/input_adapters.hpp @@ -42,7 +42,7 @@ class file_input_adapter // make class move-only file_input_adapter(const file_input_adapter&) = delete; - file_input_adapter(file_input_adapter&&) = default; + file_input_adapter(file_input_adapter&&) noexcept = default; file_input_adapter& operator=(const file_input_adapter&) = delete; file_input_adapter& operator=(file_input_adapter&&) = delete; @@ -88,9 +88,10 @@ class input_stream_adapter // delete because of pointer members input_stream_adapter(const input_stream_adapter&) = delete; input_stream_adapter& operator=(input_stream_adapter&) = delete; - input_stream_adapter& operator=(input_stream_adapter&& rhs) = delete; + input_stream_adapter& operator=(input_stream_adapter&&) = delete; - input_stream_adapter(input_stream_adapter&& rhs) noexcept : is(rhs.is), sb(rhs.sb) + input_stream_adapter(input_stream_adapter&& rhs) noexcept + : is(rhs.is), sb(rhs.sb) { rhs.is = nullptr; rhs.sb = nullptr; @@ -125,7 +126,8 @@ class iterator_input_adapter using char_type = typename std::iterator_traits::value_type; iterator_input_adapter(IteratorType first, IteratorType last) - : current(std::move(first)), end(std::move(last)) {} + : current(std::move(first)), end(std::move(last)) + {} typename std::char_traits::int_type get_character() { @@ -150,7 +152,6 @@ class iterator_input_adapter { return current == end; } - }; diff --git a/include/nlohmann/detail/input/json_sax.hpp b/include/nlohmann/detail/input/json_sax.hpp index a7ad2abcac..c41493d6d2 100644 --- a/include/nlohmann/detail/input/json_sax.hpp +++ b/include/nlohmann/detail/input/json_sax.hpp @@ -166,9 +166,9 @@ class json_sax_dom_parser // make class move-only json_sax_dom_parser(const json_sax_dom_parser&) = delete; - json_sax_dom_parser(json_sax_dom_parser&&) noexcept = default; + json_sax_dom_parser(json_sax_dom_parser&&) = default; json_sax_dom_parser& operator=(const json_sax_dom_parser&) = delete; - json_sax_dom_parser& operator=(json_sax_dom_parser&&) noexcept = default; + json_sax_dom_parser& operator=(json_sax_dom_parser&&) = default; ~json_sax_dom_parser() = default; bool null() @@ -341,9 +341,9 @@ class json_sax_dom_callback_parser // make class move-only json_sax_dom_callback_parser(const json_sax_dom_callback_parser&) = delete; - json_sax_dom_callback_parser(json_sax_dom_callback_parser&&) noexcept = default; + json_sax_dom_callback_parser(json_sax_dom_callback_parser&&) = default; json_sax_dom_callback_parser& operator=(const json_sax_dom_callback_parser&) = delete; - json_sax_dom_callback_parser& operator=(json_sax_dom_callback_parser&&) noexcept = default; + json_sax_dom_callback_parser& operator=(json_sax_dom_callback_parser&&) = default; ~json_sax_dom_callback_parser() = default; bool null() diff --git a/include/nlohmann/detail/input/lexer.hpp b/include/nlohmann/detail/input/lexer.hpp index c2141ed263..44b4a5eaac 100644 --- a/include/nlohmann/detail/input/lexer.hpp +++ b/include/nlohmann/detail/input/lexer.hpp @@ -112,7 +112,7 @@ class lexer : public lexer_base public: using token_type = typename lexer_base::token_type; - explicit lexer(InputAdapterType&& adapter, bool ignore_comments_ = false) + explicit lexer(InputAdapterType&& adapter, bool ignore_comments_ = false) noexcept : ia(std::move(adapter)) , ignore_comments(ignore_comments_) , decimal_point_char(static_cast(get_decimal_point())) diff --git a/single_include/nlohmann/json.hpp b/single_include/nlohmann/json.hpp index 8d6bcb1b1e..70d9fca477 100644 --- a/single_include/nlohmann/json.hpp +++ b/single_include/nlohmann/json.hpp @@ -4851,7 +4851,7 @@ class file_input_adapter // make class move-only file_input_adapter(const file_input_adapter&) = delete; - file_input_adapter(file_input_adapter&&) = default; + file_input_adapter(file_input_adapter&&) noexcept = default; file_input_adapter& operator=(const file_input_adapter&) = delete; file_input_adapter& operator=(file_input_adapter&&) = delete; @@ -4897,9 +4897,10 @@ class input_stream_adapter // delete because of pointer members input_stream_adapter(const input_stream_adapter&) = delete; input_stream_adapter& operator=(input_stream_adapter&) = delete; - input_stream_adapter& operator=(input_stream_adapter&& rhs) = delete; + input_stream_adapter& operator=(input_stream_adapter&&) = delete; - input_stream_adapter(input_stream_adapter&& rhs) noexcept : is(rhs.is), sb(rhs.sb) + input_stream_adapter(input_stream_adapter&& rhs) noexcept + : is(rhs.is), sb(rhs.sb) { rhs.is = nullptr; rhs.sb = nullptr; @@ -4934,7 +4935,8 @@ class iterator_input_adapter using char_type = typename std::iterator_traits::value_type; iterator_input_adapter(IteratorType first, IteratorType last) - : current(std::move(first)), end(std::move(last)) {} + : current(std::move(first)), end(std::move(last)) + {} typename std::char_traits::int_type get_character() { @@ -4959,7 +4961,6 @@ class iterator_input_adapter { return current == end; } - }; @@ -5453,9 +5454,9 @@ class json_sax_dom_parser // make class move-only json_sax_dom_parser(const json_sax_dom_parser&) = delete; - json_sax_dom_parser(json_sax_dom_parser&&) noexcept = default; + json_sax_dom_parser(json_sax_dom_parser&&) = default; json_sax_dom_parser& operator=(const json_sax_dom_parser&) = delete; - json_sax_dom_parser& operator=(json_sax_dom_parser&&) noexcept = default; + json_sax_dom_parser& operator=(json_sax_dom_parser&&) = default; ~json_sax_dom_parser() = default; bool null() @@ -5628,9 +5629,9 @@ class json_sax_dom_callback_parser // make class move-only json_sax_dom_callback_parser(const json_sax_dom_callback_parser&) = delete; - json_sax_dom_callback_parser(json_sax_dom_callback_parser&&) noexcept = default; + json_sax_dom_callback_parser(json_sax_dom_callback_parser&&) = default; json_sax_dom_callback_parser& operator=(const json_sax_dom_callback_parser&) = delete; - json_sax_dom_callback_parser& operator=(json_sax_dom_callback_parser&&) noexcept = default; + json_sax_dom_callback_parser& operator=(json_sax_dom_callback_parser&&) = default; ~json_sax_dom_callback_parser() = default; bool null() @@ -6099,7 +6100,7 @@ class lexer : public lexer_base public: using token_type = typename lexer_base::token_type; - explicit lexer(InputAdapterType&& adapter, bool ignore_comments_ = false) + explicit lexer(InputAdapterType&& adapter, bool ignore_comments_ = false) noexcept : ia(std::move(adapter)) , ignore_comments(ignore_comments_) , decimal_point_char(static_cast(get_decimal_point())) @@ -7817,7 +7818,7 @@ class binary_reader @param[in] adapter input adapter to read from */ - explicit binary_reader(InputAdapterType&& adapter) : ia(std::move(adapter)) + explicit binary_reader(InputAdapterType&& adapter) noexcept : ia(std::move(adapter)) { (void)detail::is_sax_static_asserts {}; } diff --git a/test/src/unit-class_lexer.cpp b/test/src/unit-class_lexer.cpp index ec9ce073ec..d94bdbe79f 100644 --- a/test/src/unit-class_lexer.cpp +++ b/test/src/unit-class_lexer.cpp @@ -40,7 +40,7 @@ json::lexer::token_type scan_string(const char* s, bool ignore_comments = false) json::lexer::token_type scan_string(const char* s, const bool ignore_comments) { auto ia = nlohmann::detail::input_adapter(s); - return nlohmann::detail::lexer(std::move(ia), ignore_comments).scan(); + return nlohmann::detail::lexer(std::move(ia), ignore_comments).scan(); // NOLINT(hicpp-move-const-arg,performance-move-const-arg) } } // namespace @@ -48,7 +48,7 @@ std::string get_error_message(const char* s, bool ignore_comments = false); std::string get_error_message(const char* s, const bool ignore_comments) { auto ia = nlohmann::detail::input_adapter(s); - auto lexer = nlohmann::detail::lexer(std::move(ia), ignore_comments); + auto lexer = nlohmann::detail::lexer(std::move(ia), ignore_comments); // NOLINT(hicpp-move-const-arg,performance-move-const-arg) lexer.scan(); return lexer.get_error_message(); } diff --git a/test/src/unit-deserialization.cpp b/test/src/unit-deserialization.cpp index 6b7023a9ee..5ccc0d9e71 100644 --- a/test/src/unit-deserialization.cpp +++ b/test/src/unit-deserialization.cpp @@ -400,7 +400,7 @@ TEST_CASE("deserialization") SECTION("from array") { - uint8_t v[] = {'t', 'r', 'u', 'e'}; + uint8_t v[] = {'t', 'r', 'u', 'e'}; // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays) CHECK(json::parse(v) == json(true)); CHECK(json::accept(v)); @@ -496,7 +496,7 @@ TEST_CASE("deserialization") SECTION("from array") { - uint8_t v[] = {'t', 'r', 'u', 'e'}; + uint8_t v[] = {'t', 'r', 'u', 'e'}; // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays) CHECK(json::parse(std::begin(v), std::end(v)) == json(true)); CHECK(json::accept(std::begin(v), std::end(v))); @@ -561,7 +561,7 @@ TEST_CASE("deserialization") { SECTION("case 1") { - uint8_t v[] = {'\"', 'a', 'a', 'a', 'a', 'a', 'a', '\\', 'u'}; + std::array v = {{'\"', 'a', 'a', 'a', 'a', 'a', 'a', '\\', 'u'}}; json _; CHECK_THROWS_AS(_ = json::parse(std::begin(v), std::end(v)), json::parse_error&); CHECK(!json::accept(std::begin(v), std::end(v))); @@ -578,7 +578,7 @@ TEST_CASE("deserialization") SECTION("case 2") { - uint8_t v[] = {'\"', 'a', 'a', 'a', 'a', 'a', 'a', '\\', 'u', '1'}; + std::array v = {{'\"', 'a', 'a', 'a', 'a', 'a', 'a', '\\', 'u', '1'}}; json _; CHECK_THROWS_AS(_ = json::parse(std::begin(v), std::end(v)), json::parse_error&); CHECK(!json::accept(std::begin(v), std::end(v))); @@ -595,7 +595,7 @@ TEST_CASE("deserialization") SECTION("case 3") { - uint8_t v[] = {'\"', 'a', 'a', 'a', 'a', 'a', 'a', '\\', 'u', '1', '1', '1', '1', '1', '1', '1', '1'}; + std::array v = {{'\"', 'a', 'a', 'a', 'a', 'a', 'a', '\\', 'u', '1', '1', '1', '1', '1', '1', '1', '1'}}; json _; CHECK_THROWS_AS(_ = json::parse(std::begin(v), std::end(v)), json::parse_error&); CHECK(!json::accept(std::begin(v), std::end(v))); @@ -612,7 +612,7 @@ TEST_CASE("deserialization") SECTION("case 4") { - uint8_t v[] = {'\"', 'a', 'a', 'a', 'a', 'a', 'a', 'u', '1', '1', '1', '1', '1', '1', '1', '1', '\\'}; + std::array v = {{'\"', 'a', 'a', 'a', 'a', 'a', 'a', 'u', '1', '1', '1', '1', '1', '1', '1', '1', '\\'}}; json _; CHECK_THROWS_AS(_ = json::parse(std::begin(v), std::end(v)), json::parse_error&); CHECK(!json::accept(std::begin(v), std::end(v))); @@ -629,7 +629,7 @@ TEST_CASE("deserialization") SECTION("case 5") { - uint8_t v[] = {'\"', 0x7F, 0xC1}; + std::array v = {{'\"', 0x7F, 0xC1}}; json _; CHECK_THROWS_AS(_ = json::parse(std::begin(v), std::end(v)), json::parse_error&); CHECK(!json::accept(std::begin(v), std::end(v))); @@ -646,7 +646,7 @@ TEST_CASE("deserialization") SECTION("case 6") { - uint8_t v[] = {'\"', 0x7F, 0xDF, 0x7F}; + std::array v = {{'\"', 0x7F, 0xDF, 0x7F}}; json _; CHECK_THROWS_AS(_ = json::parse(std::begin(v), std::end(v)), json::parse_error&); CHECK_THROWS_WITH(_ = json::parse(std::begin(v), std::end(v)), @@ -665,7 +665,7 @@ TEST_CASE("deserialization") SECTION("case 7") { - uint8_t v[] = {'\"', 0x7F, 0xDF, 0xC0}; + std::array v = {{'\"', 0x7F, 0xDF, 0xC0}}; json _; CHECK_THROWS_AS(_ = json::parse(std::begin(v), std::end(v)), json::parse_error&); CHECK(!json::accept(std::begin(v), std::end(v))); @@ -682,7 +682,7 @@ TEST_CASE("deserialization") SECTION("case 8") { - uint8_t v[] = {'\"', 0x7F, 0xE0, 0x9F}; + std::array v = {{'\"', 0x7F, 0xE0, 0x9F}}; json _; CHECK_THROWS_AS(_ = json::parse(std::begin(v), std::end(v)), json::parse_error&); CHECK(!json::accept(std::begin(v), std::end(v))); @@ -699,7 +699,7 @@ TEST_CASE("deserialization") SECTION("case 9") { - uint8_t v[] = {'\"', 0x7F, 0xEF, 0xC0}; + std::array v = {{'\"', 0x7F, 0xEF, 0xC0}}; json _; CHECK_THROWS_AS(_ = json::parse(std::begin(v), std::end(v)), json::parse_error&); CHECK(!json::accept(std::begin(v), std::end(v))); @@ -716,7 +716,7 @@ TEST_CASE("deserialization") SECTION("case 10") { - uint8_t v[] = {'\"', 0x7F, 0xED, 0x7F}; + std::array v = {{'\"', 0x7F, 0xED, 0x7F}}; json _; CHECK_THROWS_AS(_ = json::parse(std::begin(v), std::end(v)), json::parse_error&); CHECK(!json::accept(std::begin(v), std::end(v))); @@ -733,7 +733,7 @@ TEST_CASE("deserialization") SECTION("case 11") { - uint8_t v[] = {'\"', 0x7F, 0xF0, 0x8F}; + std::array v = {{'\"', 0x7F, 0xF0, 0x8F}}; json _; CHECK_THROWS_AS(_ = json::parse(std::begin(v), std::end(v)), json::parse_error&); CHECK(!json::accept(std::begin(v), std::end(v))); @@ -750,7 +750,7 @@ TEST_CASE("deserialization") SECTION("case 12") { - uint8_t v[] = {'\"', 0x7F, 0xF0, 0xC0}; + std::array v = {{'\"', 0x7F, 0xF0, 0xC0}}; json _; CHECK_THROWS_AS(_ = json::parse(std::begin(v), std::end(v)), json::parse_error&); CHECK(!json::accept(std::begin(v), std::end(v))); @@ -767,7 +767,7 @@ TEST_CASE("deserialization") SECTION("case 13") { - uint8_t v[] = {'\"', 0x7F, 0xF3, 0x7F}; + std::array v = {{'\"', 0x7F, 0xF3, 0x7F}}; json _; CHECK_THROWS_AS(_ = json::parse(std::begin(v), std::end(v)), json::parse_error&); CHECK(!json::accept(std::begin(v), std::end(v))); @@ -784,7 +784,7 @@ TEST_CASE("deserialization") SECTION("case 14") { - uint8_t v[] = {'\"', 0x7F, 0xF3, 0xC0}; + std::array v = {{'\"', 0x7F, 0xF3, 0xC0}}; json _; CHECK_THROWS_AS(_ = json::parse(std::begin(v), std::end(v)), json::parse_error&); CHECK(!json::accept(std::begin(v), std::end(v))); @@ -801,7 +801,7 @@ TEST_CASE("deserialization") SECTION("case 15") { - uint8_t v[] = {'\"', 0x7F, 0xF4, 0x7F}; + std::array v = {{'\"', 0x7F, 0xF4, 0x7F}}; json _; CHECK_THROWS_AS(_ = json::parse(std::begin(v), std::end(v)), json::parse_error&); CHECK(!json::accept(std::begin(v), std::end(v))); @@ -818,7 +818,7 @@ TEST_CASE("deserialization") SECTION("case 16") { - uint8_t v[] = {'{', '\"', '\"', ':', '1', '1'}; + std::array v = {{'{', '\"', '\"', ':', '1', '1'}}; json _; CHECK_THROWS_AS(_ = json::parse(std::begin(v), std::end(v)), json::parse_error&); CHECK(!json::accept(std::begin(v), std::end(v))); diff --git a/test/src/unit-regression1.cpp b/test/src/unit-regression1.cpp index e7a5c910a0..3f0daf48ea 100644 --- a/test/src/unit-regression1.cpp +++ b/test/src/unit-regression1.cpp @@ -119,6 +119,10 @@ struct nocopy { nocopy() = default; nocopy(const nocopy&) = delete; + nocopy(nocopy&&) = delete; + nocopy& operator=(const nocopy&) = delete; + nocopy& operator=(nocopy&&) = delete; + ~nocopy() = default; int val = 0; @@ -408,18 +412,18 @@ TEST_CASE("regression tests 1") json j; // Non-const access with key as "char []" - char array_key[] = "Key1"; + char array_key[] = "Key1"; // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays) CHECK_NOTHROW(j[array_key] = 1); CHECK(j[array_key] == json(1)); // Non-const access with key as "const char[]" - const char const_array_key[] = "Key2"; + const char const_array_key[] = "Key2"; // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays) CHECK_NOTHROW(j[const_array_key] = 2); CHECK(j[const_array_key] == json(2)); // Non-const access with key as "char *" - char _ptr_key[] = "Key3"; - char* ptr_key = &_ptr_key[0]; + char _ptr_key[] = "Key3"; // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays) + char* ptr_key = &_ptr_key[0]; // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays) CHECK_NOTHROW(j[ptr_key] = 3); CHECK(j[ptr_key] == json(3)); @@ -1099,10 +1103,10 @@ TEST_CASE("regression tests 1") SECTION("issue #414 - compare with literal 0)") { #define CHECK_TYPE(v) \ - CHECK((json(v) == v));\ - CHECK((v == json(v)));\ - CHECK_FALSE((json(v) != v));\ - CHECK_FALSE((v != json(v))); + CHECK((json(v) == (v)));\ + CHECK(((v) == json(v)));\ + CHECK_FALSE((json(v) != (v)));\ + CHECK_FALSE(((v) != json(v))); CHECK_TYPE(nullptr) CHECK_TYPE(0) diff --git a/test/src/unit-regression2.cpp b/test/src/unit-regression2.cpp index bde66d31c6..cd1e749092 100644 --- a/test/src/unit-regression2.cpp +++ b/test/src/unit-regression2.cpp @@ -432,8 +432,8 @@ TEST_CASE("regression tests 2") SECTION("issue #2067 - cannot serialize binary data to text JSON") { - const unsigned char data[] = {0x81, 0xA4, 0x64, 0x61, 0x74, 0x61, 0xC4, 0x0F, 0x33, 0x30, 0x30, 0x32, 0x33, 0x34, 0x30, 0x31, 0x30, 0x37, 0x30, 0x35, 0x30, 0x31, 0x30}; - json j = json::from_msgpack(data, sizeof(data) / sizeof(data[0])); + const std::array data = {{0x81, 0xA4, 0x64, 0x61, 0x74, 0x61, 0xC4, 0x0F, 0x33, 0x30, 0x30, 0x32, 0x33, 0x34, 0x30, 0x31, 0x30, 0x37, 0x30, 0x35, 0x30, 0x31, 0x30}}; + json j = json::from_msgpack(data.data(), data.size()); CHECK_NOTHROW( j.dump(4, // Indent ' ', // Indent char diff --git a/test/src/unit-to_chars.cpp b/test/src/unit-to_chars.cpp index 3e8bbec3c2..1a4574e0eb 100644 --- a/test/src/unit-to_chars.cpp +++ b/test/src/unit-to_chars.cpp @@ -153,12 +153,12 @@ TEST_CASE("digit gen") CAPTURE(digits) CAPTURE(expected_exponent) - char buf[32]; + std::array buf{}; int len = 0; int exponent = 0; - nlohmann::detail::dtoa_impl::grisu2(buf, len, exponent, number); + nlohmann::detail::dtoa_impl::grisu2(buf.data(), len, exponent, number); - CHECK(digits == std::string(buf, buf + len)); + CHECK(digits == std::string(buf.data(), buf.data() + len)); CHECK(expected_exponent == exponent); }; From b5c5eaad5a634132ffc8dfbffe0a9ecfcbcff87c Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Thu, 28 Jan 2021 20:00:59 +0100 Subject: [PATCH 070/113] :rotating_light: fix warnings --- .clang-tidy | 5 +++-- include/nlohmann/detail/input/binary_reader.hpp | 6 +++--- include/nlohmann/detail/input/input_adapters.hpp | 3 ++- include/nlohmann/detail/input/lexer.hpp | 2 +- single_include/nlohmann/json.hpp | 11 ++++++----- test/src/unit-regression1.cpp | 1 - 6 files changed, 15 insertions(+), 13 deletions(-) diff --git a/.clang-tidy b/.clang-tidy index cdbba1fca1..11009409e8 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -5,6 +5,7 @@ Checks: '*, -cppcoreguidelines-avoid-non-const-global-variables, -cppcoreguidelines-macro-usage, -cppcoreguidelines-pro-bounds-array-to-pointer-decay, + -cppcoreguidelines-pro-bounds-constant-array-index, -cppcoreguidelines-pro-bounds-pointer-arithmetic, -cppcoreguidelines-pro-type-reinterpret-cast, -cppcoreguidelines-pro-type-union-access, @@ -23,14 +24,14 @@ Checks: '*, -hicpp-uppercase-literal-suffix, -llvm-header-guard, -llvm-include-order, + -llvmlibc-*, -misc-no-recursion, -misc-non-private-member-variables-in-classes, -modernize-use-trailing-return-type, -readability-function-size, -readability-magic-numbers, -readability-redundant-access-specifiers, - -readability-uppercase-literal-suffix, - -llvmlibc-*' + -readability-uppercase-literal-suffix' CheckOptions: - key: hicpp-special-member-functions.AllowSoleDefaultDtor diff --git a/include/nlohmann/detail/input/binary_reader.hpp b/include/nlohmann/detail/input/binary_reader.hpp index 3dafb1b81d..c873eefa83 100644 --- a/include/nlohmann/detail/input/binary_reader.hpp +++ b/include/nlohmann/detail/input/binary_reader.hpp @@ -2211,8 +2211,8 @@ class binary_reader } // parse number string - auto number_ia = detail::input_adapter(std::forward(number_vector)); - auto number_lexer = detail::lexer(std::move(number_ia), false); + using ia_type = decltype(detail::input_adapter(std::forward(number_vector))); + auto number_lexer = detail::lexer(detail::input_adapter(std::forward(number_vector)), false); const auto result_number = number_lexer.scan(); const auto number_string = number_lexer.get_token_string(); const auto result_remainder = number_lexer.scan(); @@ -2400,7 +2400,7 @@ class binary_reader std::string get_token_string() const { std::array cr{{}}; - (std::snprintf)(cr.data(), cr.size(), "%.2hhX", static_cast(current)); + (std::snprintf)(cr.data(), cr.size(), "%.2hhX", static_cast(current)); // NOLINT(cppcoreguidelines-pro-type-vararg,hicpp-vararg) return std::string{cr.data()}; } diff --git a/include/nlohmann/detail/input/input_adapters.hpp b/include/nlohmann/detail/input/input_adapters.hpp index 9007d224b8..3edca57c89 100644 --- a/include/nlohmann/detail/input/input_adapters.hpp +++ b/include/nlohmann/detail/input/input_adapters.hpp @@ -45,6 +45,7 @@ class file_input_adapter file_input_adapter(file_input_adapter&&) noexcept = default; file_input_adapter& operator=(const file_input_adapter&) = delete; file_input_adapter& operator=(file_input_adapter&&) = delete; + ~file_input_adapter() = default; std::char_traits::int_type get_character() noexcept { @@ -465,7 +466,7 @@ class span_input_adapter contiguous_bytes_input_adapter&& get() { - return std::move(ia); + return std::move(ia); // NOLINT(hicpp-move-const-arg,performance-move-const-arg) } private: diff --git a/include/nlohmann/detail/input/lexer.hpp b/include/nlohmann/detail/input/lexer.hpp index 44b4a5eaac..4318e0bb79 100644 --- a/include/nlohmann/detail/input/lexer.hpp +++ b/include/nlohmann/detail/input/lexer.hpp @@ -1447,7 +1447,7 @@ class lexer : public lexer_base { // escape control characters std::array cs{{}}; - (std::snprintf)(cs.data(), cs.size(), "", static_cast(c)); + (std::snprintf)(cs.data(), cs.size(), "", static_cast(c)); // NOLINT(cppcoreguidelines-pro-type-vararg,hicpp-varar) result += cs.data(); } else diff --git a/single_include/nlohmann/json.hpp b/single_include/nlohmann/json.hpp index 70d9fca477..3ea9ba28c1 100644 --- a/single_include/nlohmann/json.hpp +++ b/single_include/nlohmann/json.hpp @@ -4854,6 +4854,7 @@ class file_input_adapter file_input_adapter(file_input_adapter&&) noexcept = default; file_input_adapter& operator=(const file_input_adapter&) = delete; file_input_adapter& operator=(file_input_adapter&&) = delete; + ~file_input_adapter() = default; std::char_traits::int_type get_character() noexcept { @@ -5274,7 +5275,7 @@ class span_input_adapter contiguous_bytes_input_adapter&& get() { - return std::move(ia); + return std::move(ia); // NOLINT(hicpp-move-const-arg,performance-move-const-arg) } private: @@ -7435,7 +7436,7 @@ class lexer : public lexer_base { // escape control characters std::array cs{{}}; - (std::snprintf)(cs.data(), cs.size(), "", static_cast(c)); + (std::snprintf)(cs.data(), cs.size(), "", static_cast(c)); // NOLINT(cppcoreguidelines-pro-type-vararg,hicpp-varar) result += cs.data(); } else @@ -9958,8 +9959,8 @@ class binary_reader } // parse number string - auto number_ia = detail::input_adapter(std::forward(number_vector)); - auto number_lexer = detail::lexer(std::move(number_ia), false); + using ia_type = decltype(detail::input_adapter(std::forward(number_vector))); + auto number_lexer = detail::lexer(detail::input_adapter(std::forward(number_vector)), false); const auto result_number = number_lexer.scan(); const auto number_string = number_lexer.get_token_string(); const auto result_remainder = number_lexer.scan(); @@ -10147,7 +10148,7 @@ class binary_reader std::string get_token_string() const { std::array cr{{}}; - (std::snprintf)(cr.data(), cr.size(), "%.2hhX", static_cast(current)); + (std::snprintf)(cr.data(), cr.size(), "%.2hhX", static_cast(current)); // NOLINT(cppcoreguidelines-pro-type-vararg,hicpp-vararg) return std::string{cr.data()}; } diff --git a/test/src/unit-regression1.cpp b/test/src/unit-regression1.cpp index 3f0daf48ea..c8a20a9cd7 100644 --- a/test/src/unit-regression1.cpp +++ b/test/src/unit-regression1.cpp @@ -122,7 +122,6 @@ struct nocopy nocopy(nocopy&&) = delete; nocopy& operator=(const nocopy&) = delete; nocopy& operator=(nocopy&&) = delete; - ~nocopy() = default; int val = 0; From de9ae4e7598f6f4fa3223ef084c4cb969d8a03b7 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Thu, 28 Jan 2021 20:39:50 +0100 Subject: [PATCH 071/113] :rotating_light: fix warnings --- .../nlohmann/detail/input/binary_reader.hpp | 4 +-- .../nlohmann/detail/iterators/iter_impl.hpp | 4 +-- .../iterators/json_reverse_iterator.hpp | 4 +-- .../detail/iterators/primitive_iterator.hpp | 4 +-- include/nlohmann/detail/output/serializer.hpp | 9 +++++-- single_include/nlohmann/json.hpp | 25 +++++++++++-------- 6 files changed, 30 insertions(+), 20 deletions(-) diff --git a/include/nlohmann/detail/input/binary_reader.hpp b/include/nlohmann/detail/input/binary_reader.hpp index c873eefa83..e158420e40 100644 --- a/include/nlohmann/detail/input/binary_reader.hpp +++ b/include/nlohmann/detail/input/binary_reader.hpp @@ -2211,8 +2211,8 @@ class binary_reader } // parse number string - using ia_type = decltype(detail::input_adapter(std::forward(number_vector))); - auto number_lexer = detail::lexer(detail::input_adapter(std::forward(number_vector)), false); + using ia_type = decltype(detail::input_adapter(number_vector)); + auto number_lexer = detail::lexer(detail::input_adapter(number_vector), false); const auto result_number = number_lexer.scan(); const auto number_string = number_lexer.get_token_string(); const auto result_remainder = number_lexer.scan(); diff --git a/include/nlohmann/detail/iterators/iter_impl.hpp b/include/nlohmann/detail/iterators/iter_impl.hpp index 67134166e5..7e9a985122 100644 --- a/include/nlohmann/detail/iterators/iter_impl.hpp +++ b/include/nlohmann/detail/iterators/iter_impl.hpp @@ -309,7 +309,7 @@ class iter_impl @brief post-increment (it++) @pre The iterator is initialized; i.e. `m_object != nullptr`. */ - iter_impl const operator++(int) + iter_impl const operator++(int) // NOLINT(readability-const-return-type) { auto result = *this; ++(*this); @@ -352,7 +352,7 @@ class iter_impl @brief post-decrement (it--) @pre The iterator is initialized; i.e. `m_object != nullptr`. */ - iter_impl const operator--(int) + iter_impl const operator--(int) // NOLINT(readability-const-return-type) { auto result = *this; --(*this); diff --git a/include/nlohmann/detail/iterators/json_reverse_iterator.hpp b/include/nlohmann/detail/iterators/json_reverse_iterator.hpp index f3b5b5db6b..e787fdbcd7 100644 --- a/include/nlohmann/detail/iterators/json_reverse_iterator.hpp +++ b/include/nlohmann/detail/iterators/json_reverse_iterator.hpp @@ -48,7 +48,7 @@ class json_reverse_iterator : public std::reverse_iterator explicit json_reverse_iterator(const base_iterator& it) noexcept : base_iterator(it) {} /// post-increment (it++) - json_reverse_iterator const operator++(int) + json_reverse_iterator const operator++(int) // NOLINT(readability-const-return-type) { return static_cast(base_iterator::operator++(1)); } @@ -60,7 +60,7 @@ class json_reverse_iterator : public std::reverse_iterator } /// post-decrement (it--) - json_reverse_iterator const operator--(int) + json_reverse_iterator const operator--(int) // NOLINT(readability-const-return-type) { return static_cast(base_iterator::operator--(1)); } diff --git a/include/nlohmann/detail/iterators/primitive_iterator.hpp b/include/nlohmann/detail/iterators/primitive_iterator.hpp index ae7471ef59..15aa2f08aa 100644 --- a/include/nlohmann/detail/iterators/primitive_iterator.hpp +++ b/include/nlohmann/detail/iterators/primitive_iterator.hpp @@ -87,7 +87,7 @@ class primitive_iterator_t return *this; } - primitive_iterator_t const operator++(int) noexcept + primitive_iterator_t const operator++(int) noexcept // NOLINT(readability-const-return-type) { auto result = *this; ++m_it; @@ -100,7 +100,7 @@ class primitive_iterator_t return *this; } - primitive_iterator_t const operator--(int) noexcept + primitive_iterator_t const operator--(int) noexcept // NOLINT(readability-const-return-type) { auto result = *this; --m_it; diff --git a/include/nlohmann/detail/output/serializer.hpp b/include/nlohmann/detail/output/serializer.hpp index 54250e38e9..3f27ffb7ad 100644 --- a/include/nlohmann/detail/output/serializer.hpp +++ b/include/nlohmann/detail/output/serializer.hpp @@ -454,12 +454,14 @@ class serializer { if (codepoint <= 0xFFFF) { + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-vararg,hicpp-vararg) (std::snprintf)(string_buffer.data() + bytes, 7, "\\u%04x", static_cast(codepoint)); bytes += 6; } else { + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-vararg,hicpp-vararg) (std::snprintf)(string_buffer.data() + bytes, 13, "\\u%04x\\u%04x", static_cast(0xD7C0u + (codepoint >> 10u)), static_cast(0xDC00u + (codepoint & 0x3FFu))); @@ -498,6 +500,7 @@ class serializer case error_handler_t::strict: { std::string sn(3, '\0'); + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-vararg,hicpp-vararg) (std::snprintf)(&sn[0], sn.size(), "%.2X", byte); JSON_THROW(type_error::create(316, "invalid UTF-8 byte at index " + std::to_string(i) + ": 0x" + sn)); } @@ -592,6 +595,7 @@ class serializer case error_handler_t::strict: { std::string sn(3, '\0'); + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-vararg,hicpp-vararg) (std::snprintf)(&sn[0], sn.size(), "%.2X", static_cast(s.back())); JSON_THROW(type_error::create(316, "incomplete UTF-8 string; last byte: 0x" + sn)); } @@ -783,8 +787,8 @@ class serializer void dump_float(number_float_t x, std::true_type /*is_ieee_single_or_double*/) { - char* begin = number_buffer.data(); - char* end = ::nlohmann::detail::to_chars(begin, begin + number_buffer.size(), x); + auto* begin = number_buffer.data(); + auto* end = ::nlohmann::detail::to_chars(begin, begin + number_buffer.size(), x); o->write_characters(begin, static_cast(end - begin)); } @@ -795,6 +799,7 @@ class serializer static constexpr auto d = std::numeric_limits::max_digits10; // the actual conversion + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-vararg,hicpp-vararg) std::ptrdiff_t len = (std::snprintf)(number_buffer.data(), number_buffer.size(), "%.*g", d, x); // negative value indicates an error diff --git a/single_include/nlohmann/json.hpp b/single_include/nlohmann/json.hpp index 3ea9ba28c1..73f0933c59 100644 --- a/single_include/nlohmann/json.hpp +++ b/single_include/nlohmann/json.hpp @@ -9959,8 +9959,8 @@ class binary_reader } // parse number string - using ia_type = decltype(detail::input_adapter(std::forward(number_vector))); - auto number_lexer = detail::lexer(detail::input_adapter(std::forward(number_vector)), false); + using ia_type = decltype(detail::input_adapter(number_vector)); + auto number_lexer = detail::lexer(detail::input_adapter(number_vector), false); const auto result_number = number_lexer.scan(); const auto number_string = number_lexer.get_token_string(); const auto result_remainder = number_lexer.scan(); @@ -10817,7 +10817,7 @@ class primitive_iterator_t return *this; } - primitive_iterator_t const operator++(int) noexcept + primitive_iterator_t const operator++(int) noexcept // NOLINT(readability-const-return-type) { auto result = *this; ++m_it; @@ -10830,7 +10830,7 @@ class primitive_iterator_t return *this; } - primitive_iterator_t const operator--(int) noexcept + primitive_iterator_t const operator--(int) noexcept // NOLINT(readability-const-return-type) { auto result = *this; --m_it; @@ -11194,7 +11194,7 @@ class iter_impl @brief post-increment (it++) @pre The iterator is initialized; i.e. `m_object != nullptr`. */ - iter_impl const operator++(int) + iter_impl const operator++(int) // NOLINT(readability-const-return-type) { auto result = *this; ++(*this); @@ -11237,7 +11237,7 @@ class iter_impl @brief post-decrement (it--) @pre The iterator is initialized; i.e. `m_object != nullptr`. */ - iter_impl const operator--(int) + iter_impl const operator--(int) // NOLINT(readability-const-return-type) { auto result = *this; --(*this); @@ -11578,7 +11578,7 @@ class json_reverse_iterator : public std::reverse_iterator explicit json_reverse_iterator(const base_iterator& it) noexcept : base_iterator(it) {} /// post-increment (it++) - json_reverse_iterator const operator++(int) + json_reverse_iterator const operator++(int) // NOLINT(readability-const-return-type) { return static_cast(base_iterator::operator++(1)); } @@ -11590,7 +11590,7 @@ class json_reverse_iterator : public std::reverse_iterator } /// post-decrement (it--) - json_reverse_iterator const operator--(int) + json_reverse_iterator const operator--(int) // NOLINT(readability-const-return-type) { return static_cast(base_iterator::operator--(1)); } @@ -16004,12 +16004,14 @@ class serializer { if (codepoint <= 0xFFFF) { + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-vararg,hicpp-vararg) (std::snprintf)(string_buffer.data() + bytes, 7, "\\u%04x", static_cast(codepoint)); bytes += 6; } else { + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-vararg,hicpp-vararg) (std::snprintf)(string_buffer.data() + bytes, 13, "\\u%04x\\u%04x", static_cast(0xD7C0u + (codepoint >> 10u)), static_cast(0xDC00u + (codepoint & 0x3FFu))); @@ -16048,6 +16050,7 @@ class serializer case error_handler_t::strict: { std::string sn(3, '\0'); + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-vararg,hicpp-vararg) (std::snprintf)(&sn[0], sn.size(), "%.2X", byte); JSON_THROW(type_error::create(316, "invalid UTF-8 byte at index " + std::to_string(i) + ": 0x" + sn)); } @@ -16142,6 +16145,7 @@ class serializer case error_handler_t::strict: { std::string sn(3, '\0'); + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-vararg,hicpp-vararg) (std::snprintf)(&sn[0], sn.size(), "%.2X", static_cast(s.back())); JSON_THROW(type_error::create(316, "incomplete UTF-8 string; last byte: 0x" + sn)); } @@ -16333,8 +16337,8 @@ class serializer void dump_float(number_float_t x, std::true_type /*is_ieee_single_or_double*/) { - char* begin = number_buffer.data(); - char* end = ::nlohmann::detail::to_chars(begin, begin + number_buffer.size(), x); + auto* begin = number_buffer.data(); + auto* end = ::nlohmann::detail::to_chars(begin, begin + number_buffer.size(), x); o->write_characters(begin, static_cast(end - begin)); } @@ -16345,6 +16349,7 @@ class serializer static constexpr auto d = std::numeric_limits::max_digits10; // the actual conversion + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-vararg,hicpp-vararg) std::ptrdiff_t len = (std::snprintf)(number_buffer.data(), number_buffer.size(), "%.*g", d, x); // negative value indicates an error From c3edf49451e1bac100ad83fa97e61bc24a04211a Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Thu, 28 Jan 2021 21:25:46 +0100 Subject: [PATCH 072/113] :rotating_light: fix warnings --- cmake/ci.cmake | 9 ++++- include/nlohmann/detail/hash.hpp | 2 +- .../nlohmann/detail/input/binary_reader.hpp | 8 ++--- include/nlohmann/detail/input/lexer.hpp | 2 +- include/nlohmann/detail/input/parser.hpp | 2 +- .../nlohmann/detail/output/binary_writer.hpp | 4 +-- include/nlohmann/detail/output/serializer.hpp | 12 +++---- include/nlohmann/json.hpp | 4 +-- single_include/nlohmann/json.hpp | 34 +++++++++---------- 9 files changed, 42 insertions(+), 35 deletions(-) diff --git a/cmake/ci.cmake b/cmake/ci.cmake index 433f4f1f72..01cd7c75de 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -484,7 +484,14 @@ add_custom_target(ci_oclint -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DJSON_BuildTests=OFF -DJSON_CI=ON -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_oclint - COMMAND ${OCLINT_TOOL} -i ${PROJECT_BINARY_DIR}/build_oclint/src_single/all.cpp -p ${PROJECT_BINARY_DIR}/build_oclint -- -report-type html -enable-global-analysis -o oclint_report.html + COMMAND ${OCLINT_TOOL} -i ${PROJECT_BINARY_DIR}/build_oclint/src_single/all.cpp -p ${PROJECT_BINARY_DIR}/build_oclint -- + -report-type html -enable-global-analysis --max-priority-1=0 --max-priority-2=1000 --max-priority-3=2000 + --disable-rule=MultipleUnaryOperator + --disable-rule=DoubleNegative + --disable-rule=ShortVariableName + --disable-rule=GotoStatement + --disable-rule=LongLine + -o ${PROJECT_BINARY_DIR}/build_oclint/oclint_report.html COMMENT "Check code with OCLint" ) diff --git a/include/nlohmann/detail/hash.hpp b/include/nlohmann/detail/hash.hpp index 12706a7fdb..70c5daf338 100644 --- a/include/nlohmann/detail/hash.hpp +++ b/include/nlohmann/detail/hash.hpp @@ -112,7 +112,7 @@ std::size_t hash(const BasicJsonType& j) } default: // LCOV_EXCL_LINE - JSON_ASSERT(false); // LCOV_EXCL_LINE + JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert) LCOV_EXCL_LINE return 0; // LCOV_EXCL_LINE } } diff --git a/include/nlohmann/detail/input/binary_reader.hpp b/include/nlohmann/detail/input/binary_reader.hpp index e158420e40..265c0bf795 100644 --- a/include/nlohmann/detail/input/binary_reader.hpp +++ b/include/nlohmann/detail/input/binary_reader.hpp @@ -119,7 +119,7 @@ class binary_reader break; default: // LCOV_EXCL_LINE - JSON_ASSERT(false); // LCOV_EXCL_LINE + JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert) LCOV_EXCL_LINE } // strict mode: next byte must be EOF @@ -754,7 +754,7 @@ class binary_reader } default: // LCOV_EXCL_LINE - JSON_ASSERT(false); // LCOV_EXCL_LINE + JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert) LCOV_EXCL_LINE return false; // LCOV_EXCL_LINE } } @@ -2287,7 +2287,7 @@ class binary_reader bool get_number(const input_format_t format, NumberType& result) { // step 1: read input into array with system's byte order - std::array vec; + std::array vec{}; for (std::size_t i = 0; i < sizeof(NumberType); ++i) { get(); @@ -2435,7 +2435,7 @@ class binary_reader break; default: // LCOV_EXCL_LINE - JSON_ASSERT(false); // LCOV_EXCL_LINE + JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert) LCOV_EXCL_LINE } return error_msg + " " + context + ": " + detail; diff --git a/include/nlohmann/detail/input/lexer.hpp b/include/nlohmann/detail/input/lexer.hpp index 4318e0bb79..7c77af37ed 100644 --- a/include/nlohmann/detail/input/lexer.hpp +++ b/include/nlohmann/detail/input/lexer.hpp @@ -998,7 +998,7 @@ class lexer : public lexer_base // all other characters are rejected outside scan_number() default: // LCOV_EXCL_LINE - JSON_ASSERT(false); // LCOV_EXCL_LINE + JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert) LCOV_EXCL_LINE } scan_number_minus: diff --git a/include/nlohmann/detail/input/parser.hpp b/include/nlohmann/detail/input/parser.hpp index 5f16b9af8c..532d04ab52 100644 --- a/include/nlohmann/detail/input/parser.hpp +++ b/include/nlohmann/detail/input/parser.hpp @@ -41,7 +41,7 @@ enum class parse_event_t : uint8_t template using parser_callback_t = - std::function; + std::function; /*! @brief syntax analysis diff --git a/include/nlohmann/detail/output/binary_writer.hpp b/include/nlohmann/detail/output/binary_writer.hpp index 3fe102d442..2ff5c377e0 100644 --- a/include/nlohmann/detail/output/binary_writer.hpp +++ b/include/nlohmann/detail/output/binary_writer.hpp @@ -1140,7 +1140,7 @@ class binary_writer // LCOV_EXCL_START default: - JSON_ASSERT(false); + JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert) return 0ul; // LCOV_EXCL_STOP } @@ -1186,7 +1186,7 @@ class binary_writer // LCOV_EXCL_START default: - JSON_ASSERT(false); + JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert) return; // LCOV_EXCL_STOP } diff --git a/include/nlohmann/detail/output/serializer.hpp b/include/nlohmann/detail/output/serializer.hpp index 3f27ffb7ad..91842ed7b9 100644 --- a/include/nlohmann/detail/output/serializer.hpp +++ b/include/nlohmann/detail/output/serializer.hpp @@ -358,7 +358,7 @@ class serializer } default: // LCOV_EXCL_LINE - JSON_ASSERT(false); // LCOV_EXCL_LINE + JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert) LCOV_EXCL_LINE } } @@ -560,7 +560,7 @@ class serializer } default: // LCOV_EXCL_LINE - JSON_ASSERT(false); // LCOV_EXCL_LINE + JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert) LCOV_EXCL_LINE } break; } @@ -624,7 +624,7 @@ class serializer } default: // LCOV_EXCL_LINE - JSON_ASSERT(false); // LCOV_EXCL_LINE + JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert) LCOV_EXCL_LINE } } } @@ -704,7 +704,7 @@ class serializer } // use a pointer to fill the buffer - auto buffer_ptr = number_buffer.begin(); + auto buffer_ptr = number_buffer.begin(); // NOLINT(llvm-qualified-auto,readability-qualified-auto,cppcoreguidelines-pro-type-vararg,hicpp-vararg) const bool is_negative = std::is_same::value && !(x >= 0); // see issue #755 number_unsigned_t abs_value; @@ -906,7 +906,7 @@ class serializer */ number_unsigned_t remove_sign(number_unsigned_t x) { - JSON_ASSERT(false); // LCOV_EXCL_LINE + JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert) LCOV_EXCL_LINE return x; // LCOV_EXCL_LINE } @@ -921,7 +921,7 @@ class serializer */ inline number_unsigned_t remove_sign(number_integer_t x) noexcept { - JSON_ASSERT(x < 0 && x < (std::numeric_limits::max)()); + JSON_ASSERT(x < 0 && x < (std::numeric_limits::max)()); // NOLINT(misc-redundant-expression) return static_cast(-(x + 1)) + 1; } diff --git a/include/nlohmann/json.hpp b/include/nlohmann/json.hpp index 2eb288eccc..8b63a3fecc 100644 --- a/include/nlohmann/json.hpp +++ b/include/nlohmann/json.hpp @@ -1525,7 +1525,7 @@ class basic_json m_type = value_t::discarded; break; default: // LCOV_EXCL_LINE - JSON_ASSERT(false); // LCOV_EXCL_LINE + JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert) LCOV_EXCL_LINE } assert_invariant(); } @@ -8273,7 +8273,7 @@ class basic_json // if there exists a parent it cannot be primitive default: // LCOV_EXCL_LINE - JSON_ASSERT(false); // LCOV_EXCL_LINE + JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert) LCOV_EXCL_LINE } }; diff --git a/single_include/nlohmann/json.hpp b/single_include/nlohmann/json.hpp index 73f0933c59..49e6d017a1 100644 --- a/single_include/nlohmann/json.hpp +++ b/single_include/nlohmann/json.hpp @@ -4778,7 +4778,7 @@ std::size_t hash(const BasicJsonType& j) } default: // LCOV_EXCL_LINE - JSON_ASSERT(false); // LCOV_EXCL_LINE + JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert) LCOV_EXCL_LINE return 0; // LCOV_EXCL_LINE } } @@ -6987,7 +6987,7 @@ class lexer : public lexer_base // all other characters are rejected outside scan_number() default: // LCOV_EXCL_LINE - JSON_ASSERT(false); // LCOV_EXCL_LINE + JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert) LCOV_EXCL_LINE } scan_number_minus: @@ -7867,7 +7867,7 @@ class binary_reader break; default: // LCOV_EXCL_LINE - JSON_ASSERT(false); // LCOV_EXCL_LINE + JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert) LCOV_EXCL_LINE } // strict mode: next byte must be EOF @@ -8502,7 +8502,7 @@ class binary_reader } default: // LCOV_EXCL_LINE - JSON_ASSERT(false); // LCOV_EXCL_LINE + JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert) LCOV_EXCL_LINE return false; // LCOV_EXCL_LINE } } @@ -10035,7 +10035,7 @@ class binary_reader bool get_number(const input_format_t format, NumberType& result) { // step 1: read input into array with system's byte order - std::array vec; + std::array vec{}; for (std::size_t i = 0; i < sizeof(NumberType); ++i) { get(); @@ -10183,7 +10183,7 @@ class binary_reader break; default: // LCOV_EXCL_LINE - JSON_ASSERT(false); // LCOV_EXCL_LINE + JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert) LCOV_EXCL_LINE } return error_msg + " " + context + ": " + detail; @@ -10263,7 +10263,7 @@ enum class parse_event_t : uint8_t template using parser_callback_t = - std::function; + std::function; /*! @brief syntax analysis @@ -13981,7 +13981,7 @@ class binary_writer // LCOV_EXCL_START default: - JSON_ASSERT(false); + JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert) return 0ul; // LCOV_EXCL_STOP } @@ -14027,7 +14027,7 @@ class binary_writer // LCOV_EXCL_START default: - JSON_ASSERT(false); + JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert) return; // LCOV_EXCL_STOP } @@ -15908,7 +15908,7 @@ class serializer } default: // LCOV_EXCL_LINE - JSON_ASSERT(false); // LCOV_EXCL_LINE + JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert) LCOV_EXCL_LINE } } @@ -16110,7 +16110,7 @@ class serializer } default: // LCOV_EXCL_LINE - JSON_ASSERT(false); // LCOV_EXCL_LINE + JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert) LCOV_EXCL_LINE } break; } @@ -16174,7 +16174,7 @@ class serializer } default: // LCOV_EXCL_LINE - JSON_ASSERT(false); // LCOV_EXCL_LINE + JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert) LCOV_EXCL_LINE } } } @@ -16254,7 +16254,7 @@ class serializer } // use a pointer to fill the buffer - auto buffer_ptr = number_buffer.begin(); + auto buffer_ptr = number_buffer.begin(); // NOLINT(llvm-qualified-auto,readability-qualified-auto,cppcoreguidelines-pro-type-vararg,hicpp-vararg) const bool is_negative = std::is_same::value && !(x >= 0); // see issue #755 number_unsigned_t abs_value; @@ -16456,7 +16456,7 @@ class serializer */ number_unsigned_t remove_sign(number_unsigned_t x) { - JSON_ASSERT(false); // LCOV_EXCL_LINE + JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert) LCOV_EXCL_LINE return x; // LCOV_EXCL_LINE } @@ -16471,7 +16471,7 @@ class serializer */ inline number_unsigned_t remove_sign(number_integer_t x) noexcept { - JSON_ASSERT(x < 0 && x < (std::numeric_limits::max)()); + JSON_ASSERT(x < 0 && x < (std::numeric_limits::max)()); // NOLINT(misc-redundant-expression) return static_cast(-(x + 1)) + 1; } @@ -18153,7 +18153,7 @@ class basic_json m_type = value_t::discarded; break; default: // LCOV_EXCL_LINE - JSON_ASSERT(false); // LCOV_EXCL_LINE + JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert) LCOV_EXCL_LINE } assert_invariant(); } @@ -24901,7 +24901,7 @@ class basic_json // if there exists a parent it cannot be primitive default: // LCOV_EXCL_LINE - JSON_ASSERT(false); // LCOV_EXCL_LINE + JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert) LCOV_EXCL_LINE } }; From 8b362b52cb4ede98739b5f64e39047e590c229ab Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Thu, 28 Jan 2021 21:38:19 +0100 Subject: [PATCH 073/113] :construction_worker: add CI steps for Sanitizers --- .github/workflows/ubuntu.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index e11d5cd646..d2dedeef9f 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -107,3 +107,13 @@ jobs: run: cmake -S . -B build -DJSON_CI=On - name: build run: cmake --build build --target ci_clang_tidy || true + + ci_test_clang_sanitizer: + runs-on: ubuntu-latest + container: nlohmann/json-ci:latest + steps: + - uses: actions/checkout@v2 + - name: cmake + run: cmake -S . -B build -DJSON_CI=On + - name: build + run: cmake --build build --target ci_test_clang_sanitizer From 3393ce0478181f70bf14de8eaee8693fee3c5214 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Thu, 28 Jan 2021 21:53:18 +0100 Subject: [PATCH 074/113] :rotating_light: fix warnings --- include/nlohmann/detail/input/lexer.hpp | 2 +- include/nlohmann/detail/output/binary_writer.hpp | 2 +- include/nlohmann/json.hpp | 10 +++++----- single_include/nlohmann/json.hpp | 2 +- test/src/unit-cbor.cpp | 2 +- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/include/nlohmann/detail/input/lexer.hpp b/include/nlohmann/detail/input/lexer.hpp index 7c77af37ed..8a8403a068 100644 --- a/include/nlohmann/detail/input/lexer.hpp +++ b/include/nlohmann/detail/input/lexer.hpp @@ -1447,7 +1447,7 @@ class lexer : public lexer_base { // escape control characters std::array cs{{}}; - (std::snprintf)(cs.data(), cs.size(), "", static_cast(c)); // NOLINT(cppcoreguidelines-pro-type-vararg,hicpp-varar) + (std::snprintf)(cs.data(), cs.size(), "", static_cast(c)); // NOLINT(cppcoreguidelines-pro-type-vararg,hicpp-vararg) result += cs.data(); } else diff --git a/include/nlohmann/detail/output/binary_writer.hpp b/include/nlohmann/detail/output/binary_writer.hpp index 2ff5c377e0..c784205913 100644 --- a/include/nlohmann/detail/output/binary_writer.hpp +++ b/include/nlohmann/detail/output/binary_writer.hpp @@ -1509,7 +1509,7 @@ class binary_writer void write_number(const NumberType n) { // step 1: write number to array of length NumberType - std::array vec; + std::array vec{}; std::memcpy(vec.data(), &n, sizeof(NumberType)); // step 2: write array to output (with possible reordering) diff --git a/include/nlohmann/json.hpp b/include/nlohmann/json.hpp index 8b63a3fecc..57cf9d5784 100644 --- a/include/nlohmann/json.hpp +++ b/include/nlohmann/json.hpp @@ -5936,7 +5936,7 @@ class basic_json @since version 1.0.0 */ - void swap(array_t& other) + void swap(array_t& other) // NOLINT(bugprone-exception-escape) { // swap only works for arrays if (JSON_HEDLEY_LIKELY(is_array())) @@ -5969,7 +5969,7 @@ class basic_json @since version 1.0.0 */ - void swap(object_t& other) + void swap(object_t& other) // NOLINT(bugprone-exception-escape) { // swap only works for objects if (JSON_HEDLEY_LIKELY(is_object())) @@ -6002,7 +6002,7 @@ class basic_json @since version 1.0.0 */ - void swap(string_t& other) + void swap(string_t& other) // NOLINT(bugprone-exception-escape) { // swap only works for strings if (JSON_HEDLEY_LIKELY(is_string())) @@ -6035,7 +6035,7 @@ class basic_json @since version 3.8.0 */ - void swap(binary_t& other) + void swap(binary_t& other) // NOLINT(bugprone-exception-escape) { // swap only works for strings if (JSON_HEDLEY_LIKELY(is_binary())) @@ -6049,7 +6049,7 @@ class basic_json } /// @copydoc swap(binary_t&) - void swap(typename binary_t::container_type& other) + void swap(typename binary_t::container_type& other) // NOLINT(bugprone-exception-escape) { // swap only works for strings if (JSON_HEDLEY_LIKELY(is_binary())) diff --git a/single_include/nlohmann/json.hpp b/single_include/nlohmann/json.hpp index 49e6d017a1..f8f8d52b3f 100644 --- a/single_include/nlohmann/json.hpp +++ b/single_include/nlohmann/json.hpp @@ -7436,7 +7436,7 @@ class lexer : public lexer_base { // escape control characters std::array cs{{}}; - (std::snprintf)(cs.data(), cs.size(), "", static_cast(c)); // NOLINT(cppcoreguidelines-pro-type-vararg,hicpp-varar) + (std::snprintf)(cs.data(), cs.size(), "", static_cast(c)); // NOLINT(cppcoreguidelines-pro-type-vararg,hicpp-vararg) result += cs.data(); } else diff --git a/test/src/unit-cbor.cpp b/test/src/unit-cbor.cpp index 00aafba65b..4c8f311d43 100644 --- a/test/src/unit-cbor.cpp +++ b/test/src/unit-cbor.cpp @@ -611,7 +611,7 @@ TEST_CASE("CBOR") SECTION("-32768..-129 (int 16)") { - for (int16_t i = -32768; i <= -129; ++i) + for (int16_t i = -32768; i <= int16_t(-129); ++i) { CAPTURE(i) From 80734417323cf1d36780df49f893590530634c9e Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Thu, 28 Jan 2021 22:18:34 +0100 Subject: [PATCH 075/113] :rotating_light: fix warnings --- include/nlohmann/json.hpp | 8 ++++++++ single_include/nlohmann/json.hpp | 20 ++++++++++++++------ 2 files changed, 22 insertions(+), 6 deletions(-) diff --git a/include/nlohmann/json.hpp b/include/nlohmann/json.hpp index 57cf9d5784..3624d4b296 100644 --- a/include/nlohmann/json.hpp +++ b/include/nlohmann/json.hpp @@ -6842,7 +6842,9 @@ class basic_json { auto ia = i.get(); return format == input_format_t::json + // NOLINTNEXTLINE(hicpp-move-const-arg,performance-move-const-arg) ? parser(std::move(ia), nullptr, true, ignore_comments).sax_parse(sax, strict) + // NOLINTNEXTLINE(hicpp-move-const-arg,performance-move-const-arg) : detail::binary_reader(std::move(ia)).sax_parse(format, sax, strict); } @@ -7528,6 +7530,7 @@ class basic_json basic_json result; detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = i.get(); + // NOLINTNEXTLINE(hicpp-move-const-arg,performance-move-const-arg) const bool res = binary_reader(std::move(ia)).sax_parse(input_format_t::cbor, &sdp, strict, tag_handler); return res ? result : basic_json(value_t::discarded); } @@ -7667,6 +7670,7 @@ class basic_json basic_json result; detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = i.get(); + // NOLINTNEXTLINE(hicpp-move-const-arg,performance-move-const-arg) const bool res = binary_reader(std::move(ia)).sax_parse(input_format_t::msgpack, &sdp, strict); return res ? result : basic_json(value_t::discarded); } @@ -7782,6 +7786,7 @@ class basic_json basic_json result; detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = i.get(); + // NOLINTNEXTLINE(hicpp-move-const-arg,performance-move-const-arg) const bool res = binary_reader(std::move(ia)).sax_parse(input_format_t::ubjson, &sdp, strict); return res ? result : basic_json(value_t::discarded); } @@ -7895,6 +7900,7 @@ class basic_json basic_json result; detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = i.get(); + // NOLINTNEXTLINE(hicpp-move-const-arg,performance-move-const-arg) const bool res = binary_reader(std::move(ia)).sax_parse(input_format_t::bson, &sdp, strict); return res ? result : basic_json(value_t::discarded); } @@ -8329,12 +8335,14 @@ class basic_json // check if desired value is present if (JSON_HEDLEY_UNLIKELY(it == val.m_value.object->end())) { + // NOLINTNEXTLINE(performance-inefficient-string-concatenation) JSON_THROW(parse_error::create(105, 0, error_msg + " must have member '" + member + "'")); } // check if result is of type string if (JSON_HEDLEY_UNLIKELY(string_type && !it->second.is_string())) { + // NOLINTNEXTLINE(performance-inefficient-string-concatenation) JSON_THROW(parse_error::create(105, 0, error_msg + " must have string member '" + member + "'")); } diff --git a/single_include/nlohmann/json.hpp b/single_include/nlohmann/json.hpp index f8f8d52b3f..c932756302 100644 --- a/single_include/nlohmann/json.hpp +++ b/single_include/nlohmann/json.hpp @@ -14350,7 +14350,7 @@ class binary_writer void write_number(const NumberType n) { // step 1: write number to array of length NumberType - std::array vec; + std::array vec{}; std::memcpy(vec.data(), &n, sizeof(NumberType)); // step 2: write array to output (with possible reordering) @@ -22564,7 +22564,7 @@ class basic_json @since version 1.0.0 */ - void swap(array_t& other) + void swap(array_t& other) // NOLINT(bugprone-exception-escape) { // swap only works for arrays if (JSON_HEDLEY_LIKELY(is_array())) @@ -22597,7 +22597,7 @@ class basic_json @since version 1.0.0 */ - void swap(object_t& other) + void swap(object_t& other) // NOLINT(bugprone-exception-escape) { // swap only works for objects if (JSON_HEDLEY_LIKELY(is_object())) @@ -22630,7 +22630,7 @@ class basic_json @since version 1.0.0 */ - void swap(string_t& other) + void swap(string_t& other) // NOLINT(bugprone-exception-escape) { // swap only works for strings if (JSON_HEDLEY_LIKELY(is_string())) @@ -22663,7 +22663,7 @@ class basic_json @since version 3.8.0 */ - void swap(binary_t& other) + void swap(binary_t& other) // NOLINT(bugprone-exception-escape) { // swap only works for strings if (JSON_HEDLEY_LIKELY(is_binary())) @@ -22677,7 +22677,7 @@ class basic_json } /// @copydoc swap(binary_t&) - void swap(typename binary_t::container_type& other) + void swap(typename binary_t::container_type& other) // NOLINT(bugprone-exception-escape) { // swap only works for strings if (JSON_HEDLEY_LIKELY(is_binary())) @@ -23470,7 +23470,9 @@ class basic_json { auto ia = i.get(); return format == input_format_t::json + // NOLINTNEXTLINE(hicpp-move-const-arg,performance-move-const-arg) ? parser(std::move(ia), nullptr, true, ignore_comments).sax_parse(sax, strict) + // NOLINTNEXTLINE(hicpp-move-const-arg,performance-move-const-arg) : detail::binary_reader(std::move(ia)).sax_parse(format, sax, strict); } @@ -24156,6 +24158,7 @@ class basic_json basic_json result; detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = i.get(); + // NOLINTNEXTLINE(hicpp-move-const-arg,performance-move-const-arg) const bool res = binary_reader(std::move(ia)).sax_parse(input_format_t::cbor, &sdp, strict, tag_handler); return res ? result : basic_json(value_t::discarded); } @@ -24295,6 +24298,7 @@ class basic_json basic_json result; detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = i.get(); + // NOLINTNEXTLINE(hicpp-move-const-arg,performance-move-const-arg) const bool res = binary_reader(std::move(ia)).sax_parse(input_format_t::msgpack, &sdp, strict); return res ? result : basic_json(value_t::discarded); } @@ -24410,6 +24414,7 @@ class basic_json basic_json result; detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = i.get(); + // NOLINTNEXTLINE(hicpp-move-const-arg,performance-move-const-arg) const bool res = binary_reader(std::move(ia)).sax_parse(input_format_t::ubjson, &sdp, strict); return res ? result : basic_json(value_t::discarded); } @@ -24523,6 +24528,7 @@ class basic_json basic_json result; detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = i.get(); + // NOLINTNEXTLINE(hicpp-move-const-arg,performance-move-const-arg) const bool res = binary_reader(std::move(ia)).sax_parse(input_format_t::bson, &sdp, strict); return res ? result : basic_json(value_t::discarded); } @@ -24957,12 +24963,14 @@ class basic_json // check if desired value is present if (JSON_HEDLEY_UNLIKELY(it == val.m_value.object->end())) { + // NOLINTNEXTLINE(performance-inefficient-string-concatenation) JSON_THROW(parse_error::create(105, 0, error_msg + " must have member '" + member + "'")); } // check if result is of type string if (JSON_HEDLEY_UNLIKELY(string_type && !it->second.is_string())) { + // NOLINTNEXTLINE(performance-inefficient-string-concatenation) JSON_THROW(parse_error::create(105, 0, error_msg + " must have string member '" + member + "'")); } From eacf4f49c5cdc31245bd99f02c3d942cdb024e0a Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Thu, 28 Jan 2021 22:18:51 +0100 Subject: [PATCH 076/113] :zap: add optimization to sanitizer build --- cmake/ci.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/ci.cmake b/cmake/ci.cmake index 01cd7c75de..c34f8603f5 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -408,7 +408,7 @@ endforeach() # Sanitizers. ############################################################################### -set(CLANG_CXX_FLAGS_SANITIZER "-g -O0 -fsanitize=address -fsanitize=undefined -fsanitize=integer -fsanitize=nullability -fno-omit-frame-pointer -fno-sanitize-recover=all -fsanitize-recover=unsigned-integer-overflow") +set(CLANG_CXX_FLAGS_SANITIZER "-g -O1 -fsanitize=address -fsanitize=undefined -fsanitize=integer -fsanitize=nullability -fno-omit-frame-pointer -fno-sanitize-recover=all -fsanitize-recover=unsigned-integer-overflow") add_custom_target(ci_test_clang_sanitizer COMMAND CXX=${CLANG_TOOL} CXXFLAGS=${CLANG_CXX_FLAGS_SANITIZER} ${CMAKE_COMMAND} From 1101f0e35969d41d710340ab24af55a78b36b21c Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Fri, 29 Jan 2021 14:42:45 +0100 Subject: [PATCH 077/113] :rotating_light: fix warnings --- .clang-tidy | 1 + .../nlohmann/detail/conversions/from_json.hpp | 2 +- .../nlohmann/detail/conversions/to_json.hpp | 10 +++---- include/nlohmann/detail/json_pointer.hpp | 2 +- .../nlohmann/detail/output/binary_writer.hpp | 2 +- include/nlohmann/json.hpp | 8 +++--- single_include/nlohmann/json.hpp | 24 ++++++++--------- test/src/unit-cbor.cpp | 26 +++++++++---------- test/src/unit-msgpack.cpp | 2 +- test/src/unit-regression1.cpp | 2 +- test/src/unit-regression2.cpp | 1 + 11 files changed, 38 insertions(+), 42 deletions(-) diff --git a/.clang-tidy b/.clang-tidy index 11009409e8..4122a9ec19 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -14,6 +14,7 @@ Checks: '*, -fuchsia-overloaded-operator, -google-explicit-constructor, -google-readability-function-size, + -google-runtime-int, -google-runtime-references, -hicpp-avoid-goto, -hicpp-explicit-conversions, diff --git a/include/nlohmann/detail/conversions/from_json.hpp b/include/nlohmann/detail/conversions/from_json.hpp index 7803d4b24e..faf1159f72 100644 --- a/include/nlohmann/detail/conversions/from_json.hpp +++ b/include/nlohmann/detail/conversions/from_json.hpp @@ -269,7 +269,7 @@ void from_json(const BasicJsonType& j, ConstructibleObjectType& obj) } ConstructibleObjectType ret; - auto inner_object = j.template get_ptr(); + const auto* inner_object = j.template get_ptr(); using value_type = typename ConstructibleObjectType::value_type; std::transform( inner_object->begin(), inner_object->end(), diff --git a/include/nlohmann/detail/conversions/to_json.hpp b/include/nlohmann/detail/conversions/to_json.hpp index b45004fd42..bc201a088d 100644 --- a/include/nlohmann/detail/conversions/to_json.hpp +++ b/include/nlohmann/detail/conversions/to_json.hpp @@ -73,8 +73,7 @@ struct external_constructor static void construct(BasicJsonType& j, const typename BasicJsonType::binary_t& b) { j.m_type = value_t::binary; - typename BasicJsonType::binary_t value{b}; - j.m_value = value; + j.m_value = typename BasicJsonType::binary_t(b); j.assert_invariant(); } @@ -82,8 +81,7 @@ struct external_constructor static void construct(BasicJsonType& j, typename BasicJsonType::binary_t&& b) { j.m_type = value_t::binary; - typename BasicJsonType::binary_t value{std::move(b)}; - j.m_value = value; + j.m_value = typename BasicJsonType::binary_t(std::move(b));; j.assert_invariant(); } }; @@ -322,9 +320,9 @@ void to_json(BasicJsonType& j, typename BasicJsonType::object_t&& obj) template < typename BasicJsonType, typename T, std::size_t N, enable_if_t < !std::is_constructible::value, + const T(&)[N]>::value, // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays) int > = 0 > -void to_json(BasicJsonType& j, const T(&arr)[N]) +void to_json(BasicJsonType& j, const T(&arr)[N]) // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays) { external_constructor::construct(j, arr); } diff --git a/include/nlohmann/detail/json_pointer.hpp b/include/nlohmann/detail/json_pointer.hpp index b89a77a6d4..4702a20fbb 100644 --- a/include/nlohmann/detail/json_pointer.hpp +++ b/include/nlohmann/detail/json_pointer.hpp @@ -399,7 +399,7 @@ class json_pointer */ BasicJsonType& get_and_create(BasicJsonType& j) const { - auto result = &j; + auto* result = &j; // in case no reference tokens exist, return a reference to the JSON value // j which will be overwritten by a primitive value diff --git a/include/nlohmann/detail/output/binary_writer.hpp b/include/nlohmann/detail/output/binary_writer.hpp index c784205913..2a945a98ad 100644 --- a/include/nlohmann/detail/output/binary_writer.hpp +++ b/include/nlohmann/detail/output/binary_writer.hpp @@ -36,7 +36,7 @@ class binary_writer @param[in] adapter output adapter to write to */ - explicit binary_writer(output_adapter_t adapter) : oa(adapter) + explicit binary_writer(output_adapter_t adapter) : oa(std::move(adapter)) { JSON_ASSERT(oa); } diff --git a/include/nlohmann/json.hpp b/include/nlohmann/json.hpp index 3624d4b296..526941cc5d 100644 --- a/include/nlohmann/json.hpp +++ b/include/nlohmann/json.hpp @@ -2937,7 +2937,7 @@ class basic_json static_assert(std::is_default_constructible::value, "types must be DefaultConstructible when used with get()"); - ValueType ret; + ValueType ret{}; JSONSerializer::from_json(*this, ret); return ret; } @@ -3044,10 +3044,10 @@ class basic_json template < typename T, std::size_t N, - typename Array = T (&)[N], + typename Array = T (&)[N], // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays) detail::enable_if_t < detail::has_from_json::value, int > = 0 > - Array get_to(T (&v)[N]) const + Array get_to(T (&v)[N]) const // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays) noexcept(noexcept(JSONSerializer::from_json( std::declval(), v))) { @@ -8753,7 +8753,7 @@ struct less<::nlohmann::detail::value_t> */ template<> inline void swap(nlohmann::json& j1, nlohmann::json& j2) noexcept( - is_nothrow_move_constructible::value&& + is_nothrow_move_constructible::value&& // NOLINT(misc-redundant-expression) is_nothrow_move_assignable::value ) { diff --git a/single_include/nlohmann/json.hpp b/single_include/nlohmann/json.hpp index c932756302..ec128b0fcb 100644 --- a/single_include/nlohmann/json.hpp +++ b/single_include/nlohmann/json.hpp @@ -3751,7 +3751,7 @@ void from_json(const BasicJsonType& j, ConstructibleObjectType& obj) } ConstructibleObjectType ret; - auto inner_object = j.template get_ptr(); + const auto* inner_object = j.template get_ptr(); using value_type = typename ConstructibleObjectType::value_type; std::transform( inner_object->begin(), inner_object->end(), @@ -4144,8 +4144,7 @@ struct external_constructor static void construct(BasicJsonType& j, const typename BasicJsonType::binary_t& b) { j.m_type = value_t::binary; - typename BasicJsonType::binary_t value{b}; - j.m_value = value; + j.m_value = typename BasicJsonType::binary_t(b); j.assert_invariant(); } @@ -4153,8 +4152,7 @@ struct external_constructor static void construct(BasicJsonType& j, typename BasicJsonType::binary_t&& b) { j.m_type = value_t::binary; - typename BasicJsonType::binary_t value{std::move(b)}; - j.m_value = value; + j.m_value = typename BasicJsonType::binary_t(std::move(b));; j.assert_invariant(); } }; @@ -4393,9 +4391,9 @@ void to_json(BasicJsonType& j, typename BasicJsonType::object_t&& obj) template < typename BasicJsonType, typename T, std::size_t N, enable_if_t < !std::is_constructible::value, + const T(&)[N]>::value, // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays) int > = 0 > -void to_json(BasicJsonType& j, const T(&arr)[N]) +void to_json(BasicJsonType& j, const T(&arr)[N]) // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays) { external_constructor::construct(j, arr); } @@ -12055,7 +12053,7 @@ class json_pointer */ BasicJsonType& get_and_create(BasicJsonType& j) const { - auto result = &j; + auto* result = &j; // in case no reference tokens exist, return a reference to the JSON value // j which will be overwritten by a primitive value @@ -12877,7 +12875,7 @@ class binary_writer @param[in] adapter output adapter to write to */ - explicit binary_writer(output_adapter_t adapter) : oa(adapter) + explicit binary_writer(output_adapter_t adapter) : oa(std::move(adapter)) { JSON_ASSERT(oa); } @@ -19565,7 +19563,7 @@ class basic_json static_assert(std::is_default_constructible::value, "types must be DefaultConstructible when used with get()"); - ValueType ret; + ValueType ret{}; JSONSerializer::from_json(*this, ret); return ret; } @@ -19672,10 +19670,10 @@ class basic_json template < typename T, std::size_t N, - typename Array = T (&)[N], + typename Array = T (&)[N], // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays) detail::enable_if_t < detail::has_from_json::value, int > = 0 > - Array get_to(T (&v)[N]) const + Array get_to(T (&v)[N]) const // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays) noexcept(noexcept(JSONSerializer::from_json( std::declval(), v))) { @@ -25381,7 +25379,7 @@ struct less<::nlohmann::detail::value_t> */ template<> inline void swap(nlohmann::json& j1, nlohmann::json& j2) noexcept( - is_nothrow_move_constructible::value&& + is_nothrow_move_constructible::value&& // NOLINT(misc-redundant-expression) is_nothrow_move_assignable::value ) { diff --git a/test/src/unit-cbor.cpp b/test/src/unit-cbor.cpp index 4c8f311d43..f0baec98e8 100644 --- a/test/src/unit-cbor.cpp +++ b/test/src/unit-cbor.cpp @@ -2029,20 +2029,18 @@ TEST_CASE("CBOR roundtrips" * doctest::skip()) SECTION("input from flynn") { // most of these are excluded due to differences in key order (not a real problem) - auto exclude_packed = std::set - { - TEST_DATA_DIRECTORY "/json.org/1.json", - TEST_DATA_DIRECTORY "/json.org/2.json", - TEST_DATA_DIRECTORY "/json.org/3.json", - TEST_DATA_DIRECTORY "/json.org/4.json", - TEST_DATA_DIRECTORY "/json.org/5.json", - TEST_DATA_DIRECTORY "/json_testsuite/sample.json", // kills AppVeyor - TEST_DATA_DIRECTORY "/json_tests/pass1.json", - TEST_DATA_DIRECTORY "/regression/working_file.json", - TEST_DATA_DIRECTORY "/nst_json_testsuite/test_parsing/y_object.json", - TEST_DATA_DIRECTORY "/nst_json_testsuite/test_parsing/y_object_duplicated_key.json", - TEST_DATA_DIRECTORY "/nst_json_testsuite/test_parsing/y_object_long_strings.json", - }; + std::set exclude_packed; + exclude_packed.insert(TEST_DATA_DIRECTORY "/json.org/1.json"); + exclude_packed.insert(TEST_DATA_DIRECTORY "/json.org/2.json"); + exclude_packed.insert(TEST_DATA_DIRECTORY "/json.org/3.json"); + exclude_packed.insert(TEST_DATA_DIRECTORY "/json.org/4.json"); + exclude_packed.insert(TEST_DATA_DIRECTORY "/json.org/5.json"); + exclude_packed.insert(TEST_DATA_DIRECTORY "/json_testsuite/sample.json"); // kills AppVeyor + exclude_packed.insert(TEST_DATA_DIRECTORY "/json_tests/pass1.json"); + exclude_packed.insert(TEST_DATA_DIRECTORY "/regression/working_file.json"); + exclude_packed.insert(TEST_DATA_DIRECTORY "/nst_json_testsuite/test_parsing/y_object.json"); + exclude_packed.insert(TEST_DATA_DIRECTORY "/nst_json_testsuite/test_parsing/y_object_duplicated_key.json"); + exclude_packed.insert(TEST_DATA_DIRECTORY "/nst_json_testsuite/test_parsing/y_object_long_strings.json"); for (std::string filename : { diff --git a/test/src/unit-msgpack.cpp b/test/src/unit-msgpack.cpp index dde28c4b7b..87c2f9c46c 100644 --- a/test/src/unit-msgpack.cpp +++ b/test/src/unit-msgpack.cpp @@ -446,7 +446,7 @@ TEST_CASE("MessagePack") SECTION("-32768..-129 (int 16)") { - for (int16_t i = -32768; i <= -129; ++i) + for (int16_t i = -32768; i <= int16_t(-129); ++i) { CAPTURE(i) diff --git a/test/src/unit-regression1.cpp b/test/src/unit-regression1.cpp index c8a20a9cd7..ffbd184d03 100644 --- a/test/src/unit-regression1.cpp +++ b/test/src/unit-regression1.cpp @@ -115,7 +115,7 @@ using foo_json = nlohmann::basic_json Date: Fri, 29 Jan 2021 14:47:54 +0100 Subject: [PATCH 078/113] :rotating_light: add missing header --- include/nlohmann/detail/output/binary_writer.hpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/nlohmann/detail/output/binary_writer.hpp b/include/nlohmann/detail/output/binary_writer.hpp index 2a945a98ad..9985f56cce 100644 --- a/include/nlohmann/detail/output/binary_writer.hpp +++ b/include/nlohmann/detail/output/binary_writer.hpp @@ -2,11 +2,12 @@ #include // reverse #include // array +#include // isnan, isinf #include // uint8_t, uint16_t, uint32_t, uint64_t #include // memcpy #include // numeric_limits #include // string -#include // isnan, isinf +#include // move #include #include From 52aa607c87de65143f0bea2c0903f05666c7e4fc Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Fri, 29 Jan 2021 15:15:41 +0100 Subject: [PATCH 079/113] :rotating_light: fix warnings --- .../nlohmann/detail/iterators/iter_impl.hpp | 7 ++-- include/nlohmann/json.hpp | 10 +++--- single_include/nlohmann/json.hpp | 20 +++++++----- test/src/unit-msgpack.cpp | 32 +++++++++---------- test/src/unit-regression2.cpp | 2 +- test/src/unit-to_chars.cpp | 4 +-- 6 files changed, 40 insertions(+), 35 deletions(-) diff --git a/include/nlohmann/detail/iterators/iter_impl.hpp b/include/nlohmann/detail/iterators/iter_impl.hpp index 7e9a985122..5054c1507c 100644 --- a/include/nlohmann/detail/iterators/iter_impl.hpp +++ b/include/nlohmann/detail/iterators/iter_impl.hpp @@ -138,8 +138,11 @@ class iter_impl */ iter_impl& operator=(const iter_impl& other) noexcept { - m_object = other.m_object; - m_it = other.m_it; + if (&other != this) + { + m_object = other.m_object; + m_it = other.m_it; + } return *this; } diff --git a/include/nlohmann/json.hpp b/include/nlohmann/json.hpp index 526941cc5d..2e41d7c307 100644 --- a/include/nlohmann/json.hpp +++ b/include/nlohmann/json.hpp @@ -8557,12 +8557,12 @@ class basic_json for (auto it = source.cbegin(); it != source.cend(); ++it) { // escape the key name to be used in a JSON patch - const auto key = json_pointer::escape(it.key()); + const auto path_key = path + "/" + json_pointer::escape(it.key()); if (target.find(it.key()) != target.end()) { // recursive call to compare object values at key it - auto temp_diff = diff(it.value(), target[it.key()], path + "/" + key); + auto temp_diff = diff(it.value(), target[it.key()], path_key); result.insert(result.end(), temp_diff.begin(), temp_diff.end()); } else @@ -8570,7 +8570,7 @@ class basic_json // found a key that is not in o -> remove it result.push_back(object( { - {"op", "remove"}, {"path", path + "/" + key} + {"op", "remove"}, {"path", path_key} })); } } @@ -8581,10 +8581,10 @@ class basic_json if (source.find(it.key()) == source.end()) { // found a key that is not in this -> add it - const auto key = json_pointer::escape(it.key()); + const auto path_key = path + "/" + json_pointer::escape(it.key()); result.push_back( { - {"op", "add"}, {"path", path + "/" + key}, + {"op", "add"}, {"path", path_key}, {"value", it.value()} }); } diff --git a/single_include/nlohmann/json.hpp b/single_include/nlohmann/json.hpp index ec128b0fcb..c3a21fd2f5 100644 --- a/single_include/nlohmann/json.hpp +++ b/single_include/nlohmann/json.hpp @@ -11021,8 +11021,11 @@ class iter_impl */ iter_impl& operator=(const iter_impl& other) noexcept { - m_object = other.m_object; - m_it = other.m_it; + if (&other != this) + { + m_object = other.m_object; + m_it = other.m_it; + } return *this; } @@ -12714,11 +12717,12 @@ class json_ref #include // reverse #include // array +#include // isnan, isinf #include // uint8_t, uint16_t, uint32_t, uint64_t #include // memcpy #include // numeric_limits #include // string -#include // isnan, isinf +#include // move // #include @@ -25183,12 +25187,12 @@ class basic_json for (auto it = source.cbegin(); it != source.cend(); ++it) { // escape the key name to be used in a JSON patch - const auto key = json_pointer::escape(it.key()); + const auto path_key = path + "/" + json_pointer::escape(it.key()); if (target.find(it.key()) != target.end()) { // recursive call to compare object values at key it - auto temp_diff = diff(it.value(), target[it.key()], path + "/" + key); + auto temp_diff = diff(it.value(), target[it.key()], path_key); result.insert(result.end(), temp_diff.begin(), temp_diff.end()); } else @@ -25196,7 +25200,7 @@ class basic_json // found a key that is not in o -> remove it result.push_back(object( { - {"op", "remove"}, {"path", path + "/" + key} + {"op", "remove"}, {"path", path_key} })); } } @@ -25207,10 +25211,10 @@ class basic_json if (source.find(it.key()) == source.end()) { // found a key that is not in this -> add it - const auto key = json_pointer::escape(it.key()); + const auto path_key = path + "/" + json_pointer::escape(it.key()); result.push_back( { - {"op", "add"}, {"path", path + "/" + key}, + {"op", "add"}, {"path", path_key}, {"value", it.value()} }); } diff --git a/test/src/unit-msgpack.cpp b/test/src/unit-msgpack.cpp index 87c2f9c46c..b9377dcbb4 100644 --- a/test/src/unit-msgpack.cpp +++ b/test/src/unit-msgpack.cpp @@ -1647,23 +1647,21 @@ TEST_CASE("MessagePack roundtrips" * doctest::skip()) SECTION("input from msgpack-python") { // most of these are excluded due to differences in key order (not a real problem) - auto exclude_packed = std::set - { - TEST_DATA_DIRECTORY "/json.org/1.json", - TEST_DATA_DIRECTORY "/json.org/2.json", - TEST_DATA_DIRECTORY "/json.org/3.json", - TEST_DATA_DIRECTORY "/json.org/4.json", - TEST_DATA_DIRECTORY "/json.org/5.json", - TEST_DATA_DIRECTORY "/json_testsuite/sample.json", // kills AppVeyor - TEST_DATA_DIRECTORY "/json_tests/pass1.json", - TEST_DATA_DIRECTORY "/regression/working_file.json", - TEST_DATA_DIRECTORY "/nst_json_testsuite/test_parsing/y_object.json", - TEST_DATA_DIRECTORY "/nst_json_testsuite/test_parsing/y_object_basic.json", - TEST_DATA_DIRECTORY "/nst_json_testsuite/test_parsing/y_object_duplicated_key.json", - TEST_DATA_DIRECTORY "/nst_json_testsuite/test_parsing/y_object_long_strings.json", - TEST_DATA_DIRECTORY "/nst_json_testsuite/test_parsing/y_object_simple.json", - TEST_DATA_DIRECTORY "/nst_json_testsuite/test_parsing/y_object_string_unicode.json", - }; + std::set exclude_packed; + exclude_packed.insert(TEST_DATA_DIRECTORY "/json.org/1.json"); + exclude_packed.insert(TEST_DATA_DIRECTORY "/json.org/2.json"); + exclude_packed.insert(TEST_DATA_DIRECTORY "/json.org/3.json"); + exclude_packed.insert(TEST_DATA_DIRECTORY "/json.org/4.json"); + exclude_packed.insert(TEST_DATA_DIRECTORY "/json.org/5.json"); + exclude_packed.insert(TEST_DATA_DIRECTORY "/json_testsuite/sample.json"); // kills AppVeyor + exclude_packed.insert(TEST_DATA_DIRECTORY "/json_tests/pass1.json"); + exclude_packed.insert(TEST_DATA_DIRECTORY "/regression/working_file.json"); + exclude_packed.insert(TEST_DATA_DIRECTORY "/nst_json_testsuite/test_parsing/y_object.json"); + exclude_packed.insert(TEST_DATA_DIRECTORY "/nst_json_testsuite/test_parsing/y_object_basic.json"); + exclude_packed.insert(TEST_DATA_DIRECTORY "/nst_json_testsuite/test_parsing/y_object_duplicated_key.json"); + exclude_packed.insert(TEST_DATA_DIRECTORY "/nst_json_testsuite/test_parsing/y_object_long_strings.json"); + exclude_packed.insert(TEST_DATA_DIRECTORY "/nst_json_testsuite/test_parsing/y_object_simple.json"); + exclude_packed.insert(TEST_DATA_DIRECTORY "/nst_json_testsuite/test_parsing/y_object_string_unicode.json"); for (std::string filename : { diff --git a/test/src/unit-regression2.cpp b/test/src/unit-regression2.cpp index 5b7ff23a06..6260176775 100644 --- a/test/src/unit-regression2.cpp +++ b/test/src/unit-regression2.cpp @@ -73,7 +73,7 @@ inline bool operator== (NonDefaultFromJsonStruct const& /*unused*/, NonDefaultFr enum class for_1647 { one, two }; -// NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays): this is a false positive +// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays): this is a false positive NLOHMANN_JSON_SERIALIZE_ENUM(for_1647, { {for_1647::one, "one"}, diff --git a/test/src/unit-to_chars.cpp b/test/src/unit-to_chars.cpp index 1a4574e0eb..c9a01fab72 100644 --- a/test/src/unit-to_chars.cpp +++ b/test/src/unit-to_chars.cpp @@ -360,7 +360,7 @@ TEST_CASE("formatting") auto check_float = [](float number, const std::string & expected) { std::array buf{}; - char* end = nlohmann::detail::to_chars(buf.data(), buf.data() + 32, number); + char* end = nlohmann::detail::to_chars(buf.data(), buf.data() + 32, number); // NOLINT(cppcoreguidelines-pro-type-vararg,hicpp-vararg) std::string actual(buf.data(), end); CHECK(actual == expected); @@ -420,7 +420,7 @@ TEST_CASE("formatting") auto check_double = [](double number, const std::string & expected) { std::array buf{}; - char* end = nlohmann::detail::to_chars(buf.data(), buf.data() + 32, number); + char* end = nlohmann::detail::to_chars(buf.data(), buf.data() + 32, number); // NOLINT(cppcoreguidelines-pro-type-vararg,hicpp-vararg) std::string actual(buf.data(), end); CHECK(actual == expected); From 37b80711238506c6de995cbc6de8d829d90c407e Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Fri, 29 Jan 2021 20:15:41 +0100 Subject: [PATCH 080/113] :rotating_light: fix warnings --- .../nlohmann/detail/conversions/to_chars.hpp | 2 +- .../nlohmann/detail/input/binary_reader.hpp | 2 +- .../nlohmann/detail/input/input_adapters.hpp | 2 +- include/nlohmann/detail/input/json_sax.hpp | 5 ++ include/nlohmann/detail/input/lexer.hpp | 2 +- .../nlohmann/detail/iterators/iter_impl.hpp | 4 +- .../detail/output/output_adapters.hpp | 6 ++ single_include/nlohmann/json.hpp | 23 +++-- test/src/unit-constructor2.cpp | 12 +-- test/src/unit-conversions.cpp | 2 + test/src/unit-deserialization.cpp | 4 +- test/src/unit-regression1.cpp | 4 +- test/src/unit-udt.cpp | 6 +- test/thirdparty/doctest/doctest.h | 86 +++++++++++++++---- 14 files changed, 121 insertions(+), 39 deletions(-) diff --git a/include/nlohmann/detail/conversions/to_chars.hpp b/include/nlohmann/detail/conversions/to_chars.hpp index 5b098eb8d2..e904d10fac 100644 --- a/include/nlohmann/detail/conversions/to_chars.hpp +++ b/include/nlohmann/detail/conversions/to_chars.hpp @@ -200,7 +200,7 @@ boundaries compute_boundaries(FloatType value) using bits_type = typename std::conditional::type; - const std::uint64_t bits = reinterpret_bits(value); + const auto bits = static_cast(reinterpret_bits(value)); const std::uint64_t E = bits >> (kPrecision - 1); const std::uint64_t F = bits & (kHiddenBit - 1); diff --git a/include/nlohmann/detail/input/binary_reader.hpp b/include/nlohmann/detail/input/binary_reader.hpp index 265c0bf795..09b611485f 100644 --- a/include/nlohmann/detail/input/binary_reader.hpp +++ b/include/nlohmann/detail/input/binary_reader.hpp @@ -1854,7 +1854,7 @@ class binary_reader { return false; } - result = static_cast(number); + result = static_cast(number); // NOLINT(bugprone-signed-char-misuse,cert-str34-c): number is not a char return true; } diff --git a/include/nlohmann/detail/input/input_adapters.hpp b/include/nlohmann/detail/input/input_adapters.hpp index 3edca57c89..5d0b59b57a 100644 --- a/include/nlohmann/detail/input/input_adapters.hpp +++ b/include/nlohmann/detail/input/input_adapters.hpp @@ -437,7 +437,7 @@ contiguous_bytes_input_adapter input_adapter(CharT b) } template -auto input_adapter(T (&array)[N]) -> decltype(input_adapter(array, array + N)) +auto input_adapter(T (&array)[N]) -> decltype(input_adapter(array, array + N)) // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays) { return input_adapter(array, array + N); } diff --git a/include/nlohmann/detail/input/json_sax.hpp b/include/nlohmann/detail/input/json_sax.hpp index c41493d6d2..851c1b8550 100644 --- a/include/nlohmann/detail/input/json_sax.hpp +++ b/include/nlohmann/detail/input/json_sax.hpp @@ -126,6 +126,11 @@ struct json_sax const std::string& last_token, const detail::exception& ex) = 0; + json_sax() = default; + json_sax(const json_sax&) = default; + json_sax(json_sax&&) noexcept = default; + json_sax& operator=(const json_sax&) = default; + json_sax& operator=(json_sax&&) noexcept = default; virtual ~json_sax() = default; }; diff --git a/include/nlohmann/detail/input/lexer.hpp b/include/nlohmann/detail/input/lexer.hpp index 8a8403a068..f822a2097f 100644 --- a/include/nlohmann/detail/input/lexer.hpp +++ b/include/nlohmann/detail/input/lexer.hpp @@ -1236,7 +1236,7 @@ class lexer : public lexer_base // we are done scanning a number) unget(); - char* endptr = nullptr; + char* endptr = nullptr; // NOLINT(cppcoreguidelines-pro-type-vararg,hicpp-vararg) errno = 0; // try to parse integers first and fall back to floats diff --git a/include/nlohmann/detail/iterators/iter_impl.hpp b/include/nlohmann/detail/iterators/iter_impl.hpp index 5054c1507c..69abab75f0 100644 --- a/include/nlohmann/detail/iterators/iter_impl.hpp +++ b/include/nlohmann/detail/iterators/iter_impl.hpp @@ -75,8 +75,10 @@ class iter_impl typename BasicJsonType::const_reference, typename BasicJsonType::reference>::type; - /// default constructor iter_impl() = default; + ~iter_impl() = default; + iter_impl(iter_impl&&) noexcept = default; + iter_impl& operator=(iter_impl&&) noexcept = default; /*! @brief constructor for a given JSON instance diff --git a/include/nlohmann/detail/output/output_adapters.hpp b/include/nlohmann/detail/output/output_adapters.hpp index 71ca65b92d..25886ad1ad 100644 --- a/include/nlohmann/detail/output/output_adapters.hpp +++ b/include/nlohmann/detail/output/output_adapters.hpp @@ -20,6 +20,12 @@ template struct output_adapter_protocol virtual void write_character(CharType c) = 0; virtual void write_characters(const CharType* s, std::size_t length) = 0; virtual ~output_adapter_protocol() = default; + + output_adapter_protocol() = default; + output_adapter_protocol(const output_adapter_protocol&) = default; + output_adapter_protocol(output_adapter_protocol&&) noexcept = default; + output_adapter_protocol& operator=(const output_adapter_protocol&) = default; + output_adapter_protocol& operator=(output_adapter_protocol&&) noexcept = default; }; /// a type to simplify interfaces diff --git a/single_include/nlohmann/json.hpp b/single_include/nlohmann/json.hpp index c3a21fd2f5..849f43d73a 100644 --- a/single_include/nlohmann/json.hpp +++ b/single_include/nlohmann/json.hpp @@ -5244,7 +5244,7 @@ contiguous_bytes_input_adapter input_adapter(CharT b) } template -auto input_adapter(T (&array)[N]) -> decltype(input_adapter(array, array + N)) +auto input_adapter(T (&array)[N]) -> decltype(input_adapter(array, array + N)) // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays) { return input_adapter(array, array + N); } @@ -5413,6 +5413,11 @@ struct json_sax const std::string& last_token, const detail::exception& ex) = 0; + json_sax() = default; + json_sax(const json_sax&) = default; + json_sax(json_sax&&) noexcept = default; + json_sax& operator=(const json_sax&) = default; + json_sax& operator=(json_sax&&) noexcept = default; virtual ~json_sax() = default; }; @@ -7223,7 +7228,7 @@ class lexer : public lexer_base // we are done scanning a number) unget(); - char* endptr = nullptr; + char* endptr = nullptr; // NOLINT(cppcoreguidelines-pro-type-vararg,hicpp-vararg) errno = 0; // try to parse integers first and fall back to floats @@ -9600,7 +9605,7 @@ class binary_reader { return false; } - result = static_cast(number); + result = static_cast(number); // NOLINT(bugprone-signed-char-misuse,cert-str34-c): number is not a char return true; } @@ -10958,8 +10963,10 @@ class iter_impl typename BasicJsonType::const_reference, typename BasicJsonType::reference>::type; - /// default constructor iter_impl() = default; + ~iter_impl() = default; + iter_impl(iter_impl&&) noexcept = default; + iter_impl& operator=(iter_impl&&) noexcept = default; /*! @brief constructor for a given JSON instance @@ -12752,6 +12759,12 @@ template struct output_adapter_protocol virtual void write_character(CharType c) = 0; virtual void write_characters(const CharType* s, std::size_t length) = 0; virtual ~output_adapter_protocol() = default; + + output_adapter_protocol() = default; + output_adapter_protocol(const output_adapter_protocol&) = default; + output_adapter_protocol(output_adapter_protocol&&) noexcept = default; + output_adapter_protocol& operator=(const output_adapter_protocol&) = default; + output_adapter_protocol& operator=(output_adapter_protocol&&) noexcept = default; }; /// a type to simplify interfaces @@ -14657,7 +14670,7 @@ boundaries compute_boundaries(FloatType value) using bits_type = typename std::conditional::type; - const std::uint64_t bits = reinterpret_bits(value); + const auto bits = static_cast(reinterpret_bits(value)); const std::uint64_t E = bits >> (kPrecision - 1); const std::uint64_t F = bits & (kHiddenBit - 1); diff --git a/test/src/unit-constructor2.cpp b/test/src/unit-constructor2.cpp index eb3ab9207b..4ffa96aad6 100644 --- a/test/src/unit-constructor2.cpp +++ b/test/src/unit-constructor2.cpp @@ -188,20 +188,20 @@ TEST_CASE("other constructors and destructor") { SECTION("object") { - auto* j = new json {{"foo", 1}, {"bar", false}}; - delete j; + auto* j = new json {{"foo", 1}, {"bar", false}}; // NOLINT(cppcoreguidelines-owning-memory) + delete j; // NOLINT(cppcoreguidelines-owning-memory) } SECTION("array") { - auto* j = new json {"foo", 1, 1u, false, 23.42}; - delete j; + auto* j = new json {"foo", 1, 1u, false, 23.42}; // NOLINT(cppcoreguidelines-owning-memory) + delete j; // NOLINT(cppcoreguidelines-owning-memory) } SECTION("string") { - auto* j = new json("Hello world"); - delete j; + auto* j = new json("Hello world"); // NOLINT(cppcoreguidelines-owning-memory) + delete j; // NOLINT(cppcoreguidelines-owning-memory) } } } diff --git a/test/src/unit-conversions.cpp b/test/src/unit-conversions.cpp index 6117226e29..4a544ea3f5 100644 --- a/test/src/unit-conversions.cpp +++ b/test/src/unit-conversions.cpp @@ -1639,6 +1639,7 @@ TEST_CASE("value conversion") enum class cards {kreuz, pik, herz, karo}; +// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays) - false positive NLOHMANN_JSON_SERIALIZE_ENUM(cards, { {cards::kreuz, "kreuz"}, @@ -1656,6 +1657,7 @@ enum TaskState TS_INVALID = -1, }; +// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays) - false positive NLOHMANN_JSON_SERIALIZE_ENUM(TaskState, { {TS_INVALID, nullptr}, diff --git a/test/src/unit-deserialization.cpp b/test/src/unit-deserialization.cpp index 5ccc0d9e71..293d2418fb 100644 --- a/test/src/unit-deserialization.cpp +++ b/test/src/unit-deserialization.cpp @@ -412,7 +412,7 @@ TEST_CASE("deserialization") SECTION("from chars") { - auto* v = new uint8_t[5]; + auto* v = new uint8_t[5]; // NOLINT(cppcoreguidelines-owning-memory) v[0] = 't'; v[1] = 'r'; v[2] = 'u'; @@ -426,7 +426,7 @@ TEST_CASE("deserialization") CHECK(l.events.size() == 1); CHECK(l.events == std::vector({"boolean(true)"})); - delete[] v; + delete[] v; // NOLINT(cppcoreguidelines-owning-memory) } SECTION("from std::string") diff --git a/test/src/unit-regression1.cpp b/test/src/unit-regression1.cpp index ffbd184d03..449d9fd91b 100644 --- a/test/src/unit-regression1.cpp +++ b/test/src/unit-regression1.cpp @@ -422,7 +422,7 @@ TEST_CASE("regression tests 1") // Non-const access with key as "char *" char _ptr_key[] = "Key3"; // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays) - char* ptr_key = &_ptr_key[0]; // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays) + char* ptr_key = &_ptr_key[0]; // NOLINT(cppcoreguidelines-pro-type-vararg,hicpp-vararg) CHECK_NOTHROW(j[ptr_key] = 3); CHECK(j[ptr_key] == json(3)); @@ -735,7 +735,7 @@ TEST_CASE("regression tests 1") check_roundtrip(83623297654460.33); check_roundtrip(701466573254773.6); check_roundtrip(1369013370304513); - check_roundtrip(96963648023094720); + check_roundtrip(96963648023094720); // NOLINT(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) check_roundtrip(3.478237409280108e+17); } diff --git a/test/src/unit-udt.cpp b/test/src/unit-udt.cpp index 96398dbea1..0983b371f9 100644 --- a/test/src/unit-udt.cpp +++ b/test/src/unit-udt.cpp @@ -376,7 +376,7 @@ struct adl_serializer> } else { - opt.reset(new T(j.get())); + opt.reset(new T(j.get())); // NOLINT(cppcoreguidelines-owning-memory) } } }; @@ -407,7 +407,7 @@ TEST_CASE("adl_serializer specialization" * doctest::test_suite("udt")) json j = optPerson; CHECK(j.is_null()); - optPerson.reset(new udt::person{{42}, {"John Doe"}, udt::country::russia}); + optPerson.reset(new udt::person{{42}, {"John Doe"}, udt::country::russia}); // NOLINT(cppcoreguidelines-owning-memory) j = optPerson; CHECK_FALSE(j.is_null()); @@ -520,7 +520,7 @@ TEST_CASE("Non-copyable types" * doctest::test_suite("udt")) json j = optPerson; CHECK(j.is_null()); - optPerson.reset(new udt::person{{42}, {"John Doe"}, udt::country::russia}); + optPerson.reset(new udt::person{{42}, {"John Doe"}, udt::country::russia}); // NOLINT(cppcoreguidelines-owning-memory,modernize-make-unique) j = optPerson; CHECK_FALSE(j.is_null()); diff --git a/test/thirdparty/doctest/doctest.h b/test/thirdparty/doctest/doctest.h index ae9c4d4109..7712dd6b63 100644 --- a/test/thirdparty/doctest/doctest.h +++ b/test/thirdparty/doctest/doctest.h @@ -48,8 +48,8 @@ #define DOCTEST_VERSION_MAJOR 2 #define DOCTEST_VERSION_MINOR 4 -#define DOCTEST_VERSION_PATCH 3 -#define DOCTEST_VERSION_STR "2.4.3" +#define DOCTEST_VERSION_PATCH 4 +#define DOCTEST_VERSION_STR "2.4.4" #define DOCTEST_VERSION \ (DOCTEST_VERSION_MAJOR * 10000 + DOCTEST_VERSION_MINOR * 100 + DOCTEST_VERSION_PATCH) @@ -3718,6 +3718,7 @@ namespace detail { } bool TestCase::operator<(const TestCase& other) const { + // this will be used only to differentiate between test cases - not relevant for sorting if(m_line != other.m_line) return m_line < other.m_line; const int file_cmp = m_file.compare(other.m_file); @@ -4043,15 +4044,29 @@ namespace { struct FatalConditionHandler { static LONG CALLBACK handleException(PEXCEPTION_POINTERS ExceptionInfo) { - for(size_t i = 0; i < DOCTEST_COUNTOF(signalDefs); ++i) { - if(ExceptionInfo->ExceptionRecord->ExceptionCode == signalDefs[i].id) { - reportFatal(signalDefs[i].name); - break; + // Multiple threads may enter this filter/handler at once. We want the error message to be printed on the + // console just once no matter how many threads have crashed. + static std::mutex mutex; + static bool execute = true; + { + std::lock_guard lock(mutex); + if(execute) { + bool reported = false; + for(size_t i = 0; i < DOCTEST_COUNTOF(signalDefs); ++i) { + if(ExceptionInfo->ExceptionRecord->ExceptionCode == signalDefs[i].id) { + reportFatal(signalDefs[i].name); + reported = true; + break; + } + } + if(reported == false) + reportFatal("Unhandled SEH exception caught"); + if(isDebuggerActive() && !g_cs->no_breaks) + DOCTEST_BREAK_INTO_DEBUGGER(); } + execute = false; } - // If its not an exception we care about, pass it along. - // This stops us from eating debugger breaks etc. - return EXCEPTION_CONTINUE_SEARCH; + std::exit(EXIT_FAILURE); } FatalConditionHandler() { @@ -4073,6 +4088,8 @@ namespace { original_terminate_handler = std::get_terminate(); std::set_terminate([]() noexcept { reportFatal("Terminate handler called"); + if(isDebuggerActive() && !g_cs->no_breaks) + DOCTEST_BREAK_INTO_DEBUGGER(); std::exit(EXIT_FAILURE); // explicitly exit - otherwise the SIGABRT handler may be called as well }); @@ -4083,8 +4100,29 @@ namespace { prev_sigabrt_handler = std::signal(SIGABRT, [](int signal) noexcept { if(signal == SIGABRT) { reportFatal("SIGABRT - Abort (abnormal termination) signal"); + if(isDebuggerActive() && !g_cs->no_breaks) + DOCTEST_BREAK_INTO_DEBUGGER(); + std::exit(EXIT_FAILURE); } }); + + // The following settings are taken from google test, and more + // specifically from UnitTest::Run() inside of gtest.cc + + // the user does not want to see pop-up dialogs about crashes + prev_error_mode_1 = SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOALIGNMENTFAULTEXCEPT | + SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX); + // This forces the abort message to go to stderr in all circumstances. + prev_error_mode_2 = _set_error_mode(_OUT_TO_STDERR); + // In the debug version, Visual Studio pops up a separate dialog + // offering a choice to debug the aborted program - we want to disable that. + prev_abort_behavior = _set_abort_behavior(0x0, _WRITE_ABORT_MSG | _CALL_REPORTFAULT); + // In debug mode, the Windows CRT can crash with an assertion over invalid + // input (e.g. passing an invalid file descriptor). The default handling + // for these assertions is to pop up a dialog and wait for user input. + // Instead ask the CRT to dump such assertions to stderr non-interactively. + prev_report_mode = _CrtSetReportMode(_CRT_ASSERT, _CRTDBG_MODE_FILE | _CRTDBG_MODE_DEBUG); + prev_report_file = _CrtSetReportFile(_CRT_ASSERT, _CRTDBG_FILE_STDERR); } static void reset() { @@ -4092,16 +4130,25 @@ namespace { // Unregister handler and restore the old guarantee SetUnhandledExceptionFilter(previousTop); SetThreadStackGuarantee(&guaranteeSize); - previousTop = nullptr; - isSet = false; std::set_terminate(original_terminate_handler); std::signal(SIGABRT, prev_sigabrt_handler); + SetErrorMode(prev_error_mode_1); + _set_error_mode(prev_error_mode_2); + _set_abort_behavior(prev_abort_behavior, _WRITE_ABORT_MSG | _CALL_REPORTFAULT); + _CrtSetReportMode(_CRT_ASSERT, prev_report_mode); + _CrtSetReportFile(_CRT_ASSERT, prev_report_file); + isSet = false; } } ~FatalConditionHandler() { reset(); } private: + static UINT prev_error_mode_1; + static int prev_error_mode_2; + static unsigned int prev_abort_behavior; + static int prev_report_mode; + static _HFILE prev_report_file; static void (*prev_sigabrt_handler)(int); static std::terminate_handler original_terminate_handler; static bool isSet; @@ -4109,6 +4156,11 @@ namespace { static LPTOP_LEVEL_EXCEPTION_FILTER previousTop; }; + UINT FatalConditionHandler::prev_error_mode_1; + int FatalConditionHandler::prev_error_mode_2; + unsigned int FatalConditionHandler::prev_abort_behavior; + int FatalConditionHandler::prev_report_mode; + _HFILE FatalConditionHandler::prev_report_file; void (*FatalConditionHandler::prev_sigabrt_handler)(int); std::terminate_handler FatalConditionHandler::original_terminate_handler; bool FatalConditionHandler::isSet = false; @@ -5046,7 +5098,6 @@ namespace { struct JUnitTestCaseData { -DOCTEST_CLANG_SUPPRESS_WARNING_WITH_PUSH("-Wdeprecated-declarations") // gmtime static std::string getCurrentTimestamp() { // Beware, this is not reentrant because of backward compatibility issues // Also, UTC only, again because of backward compatibility (%z is C++11) @@ -5054,16 +5105,19 @@ DOCTEST_CLANG_SUPPRESS_WARNING_WITH_PUSH("-Wdeprecated-declarations") // gmtime std::time(&rawtime); auto const timeStampSize = sizeof("2017-01-16T17:06:45Z"); - std::tm* timeInfo; - timeInfo = std::gmtime(&rawtime); + std::tm timeInfo; +#ifdef DOCTEST_PLATFORM_WINDOWS + gmtime_s(&timeInfo, &rawtime); +#else // DOCTEST_PLATFORM_WINDOWS + gmtime_r(&rawtime, &timeInfo); +#endif // DOCTEST_PLATFORM_WINDOWS char timeStamp[timeStampSize]; const char* const fmt = "%Y-%m-%dT%H:%M:%SZ"; - std::strftime(timeStamp, timeStampSize, fmt, timeInfo); + std::strftime(timeStamp, timeStampSize, fmt, &timeInfo); return std::string(timeStamp); } -DOCTEST_CLANG_SUPPRESS_WARNING_POP struct JUnitTestMessage { From 0b7af878e48f4435548f8a4da618ff7fe5d4462d Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Fri, 29 Jan 2021 22:57:52 +0100 Subject: [PATCH 081/113] :rotating_light: fix warnings --- include/nlohmann/detail/input/json_sax.hpp | 8 ++++---- include/nlohmann/detail/iterators/iter_impl.hpp | 2 +- .../nlohmann/detail/iterators/iteration_proxy.hpp | 2 +- single_include/nlohmann/json.hpp | 12 ++++++------ 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/include/nlohmann/detail/input/json_sax.hpp b/include/nlohmann/detail/input/json_sax.hpp index 851c1b8550..57c7d72c9c 100644 --- a/include/nlohmann/detail/input/json_sax.hpp +++ b/include/nlohmann/detail/input/json_sax.hpp @@ -171,9 +171,9 @@ class json_sax_dom_parser // make class move-only json_sax_dom_parser(const json_sax_dom_parser&) = delete; - json_sax_dom_parser(json_sax_dom_parser&&) = default; + json_sax_dom_parser(json_sax_dom_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) json_sax_dom_parser& operator=(const json_sax_dom_parser&) = delete; - json_sax_dom_parser& operator=(json_sax_dom_parser&&) = default; + json_sax_dom_parser& operator=(json_sax_dom_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) ~json_sax_dom_parser() = default; bool null() @@ -346,9 +346,9 @@ class json_sax_dom_callback_parser // make class move-only json_sax_dom_callback_parser(const json_sax_dom_callback_parser&) = delete; - json_sax_dom_callback_parser(json_sax_dom_callback_parser&&) = default; + json_sax_dom_callback_parser(json_sax_dom_callback_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) json_sax_dom_callback_parser& operator=(const json_sax_dom_callback_parser&) = delete; - json_sax_dom_callback_parser& operator=(json_sax_dom_callback_parser&&) = default; + json_sax_dom_callback_parser& operator=(json_sax_dom_callback_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) ~json_sax_dom_callback_parser() = default; bool null() diff --git a/include/nlohmann/detail/iterators/iter_impl.hpp b/include/nlohmann/detail/iterators/iter_impl.hpp index 69abab75f0..869fffa95b 100644 --- a/include/nlohmann/detail/iterators/iter_impl.hpp +++ b/include/nlohmann/detail/iterators/iter_impl.hpp @@ -163,7 +163,7 @@ class iter_impl @return const/non-const iterator @note It is not checked whether @a other is initialized. */ - iter_impl& operator=(const iter_impl::type>& other) noexcept + iter_impl& operator=(const iter_impl::type>& other) noexcept // NOLINT(cert-oop54-cpp) { m_object = other.m_object; m_it = other.m_it; diff --git a/include/nlohmann/detail/iterators/iteration_proxy.hpp b/include/nlohmann/detail/iterators/iteration_proxy.hpp index 1b47faeb3e..e911042204 100644 --- a/include/nlohmann/detail/iterators/iteration_proxy.hpp +++ b/include/nlohmann/detail/iterators/iteration_proxy.hpp @@ -42,7 +42,7 @@ template class iteration_proxy_value const string_type empty_str{}; public: - explicit iteration_proxy_value(IteratorType it) noexcept : anchor(it) {} + explicit iteration_proxy_value(IteratorType it) noexcept : anchor(std::move(it)) {} /// dereference operator (needed for range-based for) iteration_proxy_value& operator*() diff --git a/single_include/nlohmann/json.hpp b/single_include/nlohmann/json.hpp index 849f43d73a..7a8b9b533b 100644 --- a/single_include/nlohmann/json.hpp +++ b/single_include/nlohmann/json.hpp @@ -3943,7 +3943,7 @@ template class iteration_proxy_value const string_type empty_str{}; public: - explicit iteration_proxy_value(IteratorType it) noexcept : anchor(it) {} + explicit iteration_proxy_value(IteratorType it) noexcept : anchor(std::move(it)) {} /// dereference operator (needed for range-based for) iteration_proxy_value& operator*() @@ -5458,9 +5458,9 @@ class json_sax_dom_parser // make class move-only json_sax_dom_parser(const json_sax_dom_parser&) = delete; - json_sax_dom_parser(json_sax_dom_parser&&) = default; + json_sax_dom_parser(json_sax_dom_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) json_sax_dom_parser& operator=(const json_sax_dom_parser&) = delete; - json_sax_dom_parser& operator=(json_sax_dom_parser&&) = default; + json_sax_dom_parser& operator=(json_sax_dom_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) ~json_sax_dom_parser() = default; bool null() @@ -5633,9 +5633,9 @@ class json_sax_dom_callback_parser // make class move-only json_sax_dom_callback_parser(const json_sax_dom_callback_parser&) = delete; - json_sax_dom_callback_parser(json_sax_dom_callback_parser&&) = default; + json_sax_dom_callback_parser(json_sax_dom_callback_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) json_sax_dom_callback_parser& operator=(const json_sax_dom_callback_parser&) = delete; - json_sax_dom_callback_parser& operator=(json_sax_dom_callback_parser&&) = default; + json_sax_dom_callback_parser& operator=(json_sax_dom_callback_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) ~json_sax_dom_callback_parser() = default; bool null() @@ -11051,7 +11051,7 @@ class iter_impl @return const/non-const iterator @note It is not checked whether @a other is initialized. */ - iter_impl& operator=(const iter_impl::type>& other) noexcept + iter_impl& operator=(const iter_impl::type>& other) noexcept // NOLINT(cert-oop54-cpp) { m_object = other.m_object; m_it = other.m_it; From 24653023105b31658f4476a5ae822cb3a50a20c1 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Fri, 29 Jan 2021 23:00:30 +0100 Subject: [PATCH 082/113] :rotating_light: fix warnings --- include/nlohmann/detail/iterators/iteration_proxy.hpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/include/nlohmann/detail/iterators/iteration_proxy.hpp b/include/nlohmann/detail/iterators/iteration_proxy.hpp index e911042204..d59098d95d 100644 --- a/include/nlohmann/detail/iterators/iteration_proxy.hpp +++ b/include/nlohmann/detail/iterators/iteration_proxy.hpp @@ -4,6 +4,7 @@ #include // input_iterator_tag #include // string, to_string #include // tuple_size, get, tuple_element +#include // move #include #include @@ -42,7 +43,9 @@ template class iteration_proxy_value const string_type empty_str{}; public: - explicit iteration_proxy_value(IteratorType it) noexcept : anchor(std::move(it)) {} + explicit iteration_proxy_value(IteratorType it) noexcept + : anchor(std::move(it)) + {} /// dereference operator (needed for range-based for) iteration_proxy_value& operator*() From 7b3ba241ab3cfc77ac736dd6551e6c5f6d545adc Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Fri, 29 Jan 2021 23:00:44 +0100 Subject: [PATCH 083/113] :rotating_light: fix warnings --- single_include/nlohmann/json.hpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/single_include/nlohmann/json.hpp b/single_include/nlohmann/json.hpp index 7a8b9b533b..cc9f2d6c0f 100644 --- a/single_include/nlohmann/json.hpp +++ b/single_include/nlohmann/json.hpp @@ -3903,6 +3903,7 @@ constexpr const auto& from_json = detail::static_const::va #include // input_iterator_tag #include // string, to_string #include // tuple_size, get, tuple_element +#include // move // #include @@ -3943,7 +3944,9 @@ template class iteration_proxy_value const string_type empty_str{}; public: - explicit iteration_proxy_value(IteratorType it) noexcept : anchor(std::move(it)) {} + explicit iteration_proxy_value(IteratorType it) noexcept + : anchor(std::move(it)) + {} /// dereference operator (needed for range-based for) iteration_proxy_value& operator*() From b66ebcc950133ee80572e36f01078f36c52a507a Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sat, 30 Jan 2021 12:50:36 +0100 Subject: [PATCH 084/113] :rotating_light: fix warnings --- README.md | 4 ++-- include/nlohmann/detail/conversions/from_json.hpp | 4 ++-- include/nlohmann/detail/conversions/to_json.hpp | 6 ++++-- single_include/nlohmann/json.hpp | 10 ++++++---- test/src/unit-readme.cpp | 10 +++++----- 5 files changed, 19 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index 076737245e..328d4eb20b 100644 --- a/README.md +++ b/README.md @@ -365,7 +365,7 @@ The above example can also be expressed explicitly using [`json::parse()`](https ```cpp // parse explicitly -auto j3 = json::parse("{ \"happy\": true, \"pi\": 3.141 }"); +auto j3 = json::parse(R"({"happy": true, "pi": 3.141})"); ``` You can also get a string representation of a JSON value (serialize): @@ -577,7 +577,7 @@ j[1] = 42; bool foo = j.at(2); // comparison -j == "[\"foo\", 42, true]"_json; // true +j == R"(["foo", 1, true])"_json; // true // other stuff j.size(); // 3 entries diff --git a/include/nlohmann/detail/conversions/from_json.hpp b/include/nlohmann/detail/conversions/from_json.hpp index faf1159f72..77b87036af 100644 --- a/include/nlohmann/detail/conversions/from_json.hpp +++ b/include/nlohmann/detail/conversions/from_json.hpp @@ -396,8 +396,8 @@ struct from_json_fn /// namespace to hold default `from_json` function /// to see why this is required: /// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2015/n4381.html -namespace +namespace // NOLINT(cert-dcl59-cpp,fuchsia-header-anon-namespaces,google-build-namespaces) { -constexpr const auto& from_json = detail::static_const::value; +constexpr const auto& from_json = detail::static_const::value; // NOLINT(misc-definitions-in-headers) } // namespace } // namespace nlohmann diff --git a/include/nlohmann/detail/conversions/to_json.hpp b/include/nlohmann/detail/conversions/to_json.hpp index bc201a088d..f5e4a1d3a5 100644 --- a/include/nlohmann/detail/conversions/to_json.hpp +++ b/include/nlohmann/detail/conversions/to_json.hpp @@ -365,8 +365,10 @@ struct to_json_fn } // namespace detail /// namespace to hold default `to_json` function -namespace +/// to see why this is required: +/// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2015/n4381.html +namespace // NOLINT(cert-dcl59-cpp,fuchsia-header-anon-namespaces,google-build-namespaces) { -constexpr const auto& to_json = detail::static_const::value; +constexpr const auto& to_json = detail::static_const::value; // NOLINT(misc-definitions-in-headers) } // namespace } // namespace nlohmann diff --git a/single_include/nlohmann/json.hpp b/single_include/nlohmann/json.hpp index cc9f2d6c0f..0962729122 100644 --- a/single_include/nlohmann/json.hpp +++ b/single_include/nlohmann/json.hpp @@ -3878,9 +3878,9 @@ struct from_json_fn /// namespace to hold default `from_json` function /// to see why this is required: /// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2015/n4381.html -namespace +namespace // NOLINT(cert-dcl59-cpp,fuchsia-header-anon-namespaces,google-build-namespaces) { -constexpr const auto& from_json = detail::static_const::value; +constexpr const auto& from_json = detail::static_const::value; // NOLINT(misc-definitions-in-headers) } // namespace } // namespace nlohmann @@ -4439,9 +4439,11 @@ struct to_json_fn } // namespace detail /// namespace to hold default `to_json` function -namespace +/// to see why this is required: +/// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2015/n4381.html +namespace // NOLINT(cert-dcl59-cpp,fuchsia-header-anon-namespaces,google-build-namespaces) { -constexpr const auto& to_json = detail::static_const::value; +constexpr const auto& to_json = detail::static_const::value; // NOLINT(misc-definitions-in-headers) } // namespace } // namespace nlohmann diff --git a/test/src/unit-readme.cpp b/test/src/unit-readme.cpp index 3bf7339484..07d3efc1b0 100644 --- a/test/src/unit-readme.cpp +++ b/test/src/unit-readme.cpp @@ -123,7 +123,7 @@ TEST_CASE("README" * doctest::skip()) { // create object from string literal - json j = "{ \"happy\": true, \"pi\": 3.141 }"_json; + json j = "{ \"happy\": true, \"pi\": 3.141 }"_json; // NOLINT(modernize-raw-string-literal) // or even nicer with a raw string literal auto j2 = R"( @@ -134,7 +134,7 @@ TEST_CASE("README" * doctest::skip()) )"_json; // or explicitly - auto j3 = json::parse("{ \"happy\": true, \"pi\": 3.141 }"); + auto j3 = json::parse(R"({"happy": true, "pi": 3.141})"); // explicit conversion to string std::string s = j.dump(); // {\"happy\":true,\"pi\":3.141} @@ -158,17 +158,17 @@ TEST_CASE("README" * doctest::skip()) j.push_back(true); // comparison - bool x = (j == "[\"foo\", 1, true]"_json); // true + bool x = (j == R"(["foo", 1, true])"); // true CHECK(x == true); // iterate the array - for (json::iterator it = j.begin(); it != j.end(); ++it) + for (json::iterator it = j.begin(); it != j.end(); ++it) // NOLINT(modernize-loop-convert) { std::cout << *it << '\n'; } // range-based for - for (auto element : j) + for (auto& element : j) { std::cout << element << '\n'; } From 4ba60702f464eaf07c29983ce352a13d4934af37 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sat, 30 Jan 2021 13:21:59 +0100 Subject: [PATCH 085/113] :rotating_light: fix warnings --- include/nlohmann/json.hpp | 4 ++-- single_include/nlohmann/json.hpp | 4 ++-- test/src/unit-readme.cpp | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/include/nlohmann/json.hpp b/include/nlohmann/json.hpp index 2e41d7c307..23169faefe 100644 --- a/include/nlohmann/json.hpp +++ b/include/nlohmann/json.hpp @@ -170,7 +170,7 @@ Format](http://rfc7159.net/rfc7159) @nosubgrouping */ NLOHMANN_BASIC_JSON_TPL_DECLARATION -class basic_json +class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-special-member-functions) { private: template friend struct detail::external_constructor; @@ -1444,7 +1444,7 @@ class basic_json typename U = detail::uncvref_t, detail::enable_if_t < !detail::is_basic_json::value && detail::is_compatible_type::value, int > = 0 > - basic_json(CompatibleType && val) noexcept(noexcept( + basic_json(CompatibleType && val) noexcept(noexcept( // NOLINT(bugprone-forwarding-reference-overload) JSONSerializer::to_json(std::declval(), std::forward(val)))) { diff --git a/single_include/nlohmann/json.hpp b/single_include/nlohmann/json.hpp index 0962729122..072927be46 100644 --- a/single_include/nlohmann/json.hpp +++ b/single_include/nlohmann/json.hpp @@ -16818,7 +16818,7 @@ Format](http://rfc7159.net/rfc7159) @nosubgrouping */ NLOHMANN_BASIC_JSON_TPL_DECLARATION -class basic_json +class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-special-member-functions) { private: template friend struct detail::external_constructor; @@ -18092,7 +18092,7 @@ class basic_json typename U = detail::uncvref_t, detail::enable_if_t < !detail::is_basic_json::value && detail::is_compatible_type::value, int > = 0 > - basic_json(CompatibleType && val) noexcept(noexcept( + basic_json(CompatibleType && val) noexcept(noexcept( // NOLINT(bugprone-forwarding-reference-overload) JSONSerializer::to_json(std::declval(), std::forward(val)))) { diff --git a/test/src/unit-readme.cpp b/test/src/unit-readme.cpp index 07d3efc1b0..7db7b4e6bd 100644 --- a/test/src/unit-readme.cpp +++ b/test/src/unit-readme.cpp @@ -158,7 +158,7 @@ TEST_CASE("README" * doctest::skip()) j.push_back(true); // comparison - bool x = (j == R"(["foo", 1, true])"); // true + bool x = (j == R"(["foo", 1, true])"_json); // true CHECK(x == true); // iterate the array From 9b1d4691affb67a49dac47102be1fcde9e8e2fc6 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sat, 30 Jan 2021 14:09:04 +0100 Subject: [PATCH 086/113] :construction_worker: add CI step for coverage --- .github/workflows/ubuntu.yml | 10 ++++++++++ cmake/ci.cmake | 31 +++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+) diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index d2dedeef9f..74423010f8 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -117,3 +117,13 @@ jobs: run: cmake -S . -B build -DJSON_CI=On - name: build run: cmake --build build --target ci_test_clang_sanitizer + + ci_test_coverage: + runs-on: ubuntu-latest + container: nlohmann/json-ci:latest + steps: + - uses: actions/checkout@v2 + - name: cmake + run: cmake -S . -B build -DJSON_CI=On + - name: build + run: cmake --build build --target ci_test_coverage diff --git a/cmake/ci.cmake b/cmake/ci.cmake index c34f8603f5..dfad0c22a2 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -30,6 +30,11 @@ execute_process(COMMAND ${GCC_TOOL} --version OUTPUT_VARIABLE GCC_TOOL_VERSION E string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" GCC_TOOL_VERSION "${GCC_TOOL_VERSION}") message(STATUS "πŸ”– GCC ${GCC_TOOL_VERSION} (${GCC_TOOL})") +find_program(GCOV_TOOL NAMES gcov-HEAD gcov-11 gcov-10 gcov) +execute_process(COMMAND ${GCOV_TOOL} --version OUTPUT_VARIABLE GCOV_TOOL_VERSION ERROR_VARIABLE GCOV_TOOL_VERSION) +string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" GCOV_TOOL_VERSION "${GCOV_TOOL_VERSION}") +message(STATUS "πŸ”– GCOV ${GCOV_TOOL_VERSION} (${GCOV_TOOL})") + find_program(GIT_TOOL NAMES git) execute_process(COMMAND ${GIT_TOOL} --version OUTPUT_VARIABLE GIT_TOOL_VERSION ERROR_VARIABLE GIT_TOOL_VERSION) string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" GIT_TOOL_VERSION "${GIT_TOOL_VERSION}") @@ -45,6 +50,11 @@ execute_process(COMMAND ${INFER_TOOL} --version OUTPUT_VARIABLE INFER_TOOL_VERSI string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" INFER_TOOL_VERSION "${INFER_TOOL_VERSION}") message(STATUS "πŸ”– Infer ${INFER_TOOL_VERSION} (${INFER_TOOL})") +find_program(LCOV_TOOL NAMES lcov) +execute_process(COMMAND ${LCOV_TOOL} --version OUTPUT_VARIABLE LCOV_TOOL_VERSION ERROR_VARIABLE LCOV_TOOL_VERSION) +string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" LCOV_TOOL_VERSION "${LCOV_TOOL_VERSION}") +message(STATUS "πŸ”– LCOV ${LCOV_TOOL_VERSION} (${LCOV_TOOL})") + find_program(NINJA_TOOL NAMES ninja) execute_process(COMMAND ${NINJA_TOOL} --version OUTPUT_VARIABLE NINJA_TOOL_VERSION ERROR_VARIABLE NINJA_TOOL_VERSION) string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" NINJA_TOOL_VERSION "${NINJA_TOOL_VERSION}") @@ -61,6 +71,7 @@ execute_process(COMMAND ${VALGRIND_TOOL} --version OUTPUT_VARIABLE VALGRIND_TOOL string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" VALGRIND_TOOL_VERSION "${VALGRIND_TOOL_VERSION}") message(STATUS "πŸ”– Valgrind ${VALGRIND_TOOL_VERSION} (${VALGRIND_TOOL})") +find_program(GENHTML_TOOL NAMES genhtml) find_program(PLOG_CONVERTER_TOOL NAMES plog-converter) find_program(PVS_STUDIO_ANALYZER_TOOL NAMES pvs-studio-analyzer) find_program(SCAN_BUILD_TOOL NAMES scan-build-11 scan-build) @@ -404,6 +415,26 @@ foreach(CXX_STANDARD 11 14 17 20) ) endforeach() +############################################################################### +# Coverage. +############################################################################### + +add_custom_target(ci_test_coverage + COMMAND CXX=${GCC_TOOL} ${CMAKE_COMMAND} + -DCMAKE_BUILD_TYPE=Debug -GNinja -DCMAKE_CXX_FLAGS="--coverage;-fprofile-arcs;-ftest-coverage" + -DJSON_BuildTests=ON -DJSON_MultipleHeaders=ON + -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_coverage + COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_coverage + COMMAND cd ${PROJECT_BINARY_DIR}/build_coverage && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure + + COMMAND ${LCOV_TOOL} --directory . --capture --output-file json.info --rc lcov_branch_coverage=1 + COMMAND ${LCOV_TOOL} -e json.info ${SRC_FILES} --output-file json.info.filtered --gcov-tool ${GCOV_TOOL} --rc lcov_branch_coverage=1 + COMMAND ${CMAKE_SOURCE_DIR}/test/thirdparty/imapdl/filterbr.py json.info.filtered > json.info.filtered.noexcept + COMMAND genhtml --title "JSON for Modern C++" --legend --demangle-cpp --output-directory html --show-details --branch-coverage json.info.filtered.noexcept + + COMMENT "Compile and test with coverage" +) + ############################################################################### # Sanitizers. ############################################################################### From 6413f31d8e0287d47332ccc52bcde236307e6487 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sun, 31 Jan 2021 12:41:16 +0100 Subject: [PATCH 087/113] :construction_worker: add CI step for coverage --- .clang-tidy | 3 ++- .github/workflows/ubuntu.yml | 7 ++++++- include/nlohmann/detail/input/lexer.hpp | 4 ++-- single_include/nlohmann/json.hpp | 4 ++-- 4 files changed, 12 insertions(+), 6 deletions(-) diff --git a/.clang-tidy b/.clang-tidy index 4122a9ec19..9c1e74231c 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -38,4 +38,5 @@ CheckOptions: - key: hicpp-special-member-functions.AllowSoleDefaultDtor value: 1 -HeaderFilterRegex: '.*nlohmann.*' +#HeaderFilterRegex: '.*nlohmann.*' +HeaderFilterRegex: '^(.*doctest.h)$' diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 74423010f8..8176723962 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -106,7 +106,7 @@ jobs: - name: cmake run: cmake -S . -B build -DJSON_CI=On - name: build - run: cmake --build build --target ci_clang_tidy || true + run: cmake --build build --target ci_clang_tidy ci_test_clang_sanitizer: runs-on: ubuntu-latest @@ -127,3 +127,8 @@ jobs: run: cmake -S . -B build -DJSON_CI=On - name: build run: cmake --build build --target ci_test_coverage + - name: archive coverage report + uses: actions/upload-artifact@v2 + with: + name: code-coverage-report + path: build_coverage/html diff --git a/include/nlohmann/detail/input/lexer.hpp b/include/nlohmann/detail/input/lexer.hpp index f822a2097f..7db2722619 100644 --- a/include/nlohmann/detail/input/lexer.hpp +++ b/include/nlohmann/detail/input/lexer.hpp @@ -120,9 +120,9 @@ class lexer : public lexer_base // delete because of pointer members lexer(const lexer&) = delete; - lexer(lexer&&) noexcept = default; + lexer(lexer&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) lexer& operator=(lexer&) = delete; - lexer& operator=(lexer&&) noexcept = default; + lexer& operator=(lexer&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) ~lexer() = default; private: diff --git a/single_include/nlohmann/json.hpp b/single_include/nlohmann/json.hpp index 072927be46..e59d8d6039 100644 --- a/single_include/nlohmann/json.hpp +++ b/single_include/nlohmann/json.hpp @@ -6117,9 +6117,9 @@ class lexer : public lexer_base // delete because of pointer members lexer(const lexer&) = delete; - lexer(lexer&&) noexcept = default; + lexer(lexer&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) lexer& operator=(lexer&) = delete; - lexer& operator=(lexer&&) noexcept = default; + lexer& operator=(lexer&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) ~lexer() = default; private: From 143cc06a777872ef18ade1a06249b7f67344df1d Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sun, 31 Jan 2021 13:06:20 +0100 Subject: [PATCH 088/113] :construction_worker: add CI step for coverage --- .clang-tidy | 4 +++- .github/workflows/ubuntu.yml | 7 ++++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/.clang-tidy b/.clang-tidy index 9c1e74231c..f2c3593c46 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -38,5 +38,7 @@ CheckOptions: - key: hicpp-special-member-functions.AllowSoleDefaultDtor value: 1 +WarningsAsErrors: '*' + #HeaderFilterRegex: '.*nlohmann.*' -HeaderFilterRegex: '^(.*doctest.h)$' +HeaderFilterRegex: 'include/.*' diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 8176723962..c7bc8c306c 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -131,4 +131,9 @@ jobs: uses: actions/upload-artifact@v2 with: name: code-coverage-report - path: build_coverage/html + path: build/build_coverage/html + - name: Coveralls + uses: coverallsapp/github-action@master + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + path-to-lcov: build/build_coverage/json.info From 0f43ba9423eeaa6aa37c9952651de5cea61b1e4f Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sun, 31 Jan 2021 13:21:05 +0100 Subject: [PATCH 089/113] :construction_worker: add CI step for coverage --- .clang-tidy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.clang-tidy b/.clang-tidy index f2c3593c46..179dbb9944 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -41,4 +41,4 @@ CheckOptions: WarningsAsErrors: '*' #HeaderFilterRegex: '.*nlohmann.*' -HeaderFilterRegex: 'include/.*' +HeaderFilterRegex: '^.*hpp$' From 6738fbda9412a36fe289d7b458f2a88b0ef08e4f Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sun, 31 Jan 2021 13:49:27 +0100 Subject: [PATCH 090/113] :construction_worker: add CI step for coverage --- .clang-tidy | 2 +- .github/workflows/ubuntu.yml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.clang-tidy b/.clang-tidy index 179dbb9944..610b228399 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -41,4 +41,4 @@ CheckOptions: WarningsAsErrors: '*' #HeaderFilterRegex: '.*nlohmann.*' -HeaderFilterRegex: '^.*hpp$' +HeaderFilterRegex: '.*\.hpp$' diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index c7bc8c306c..438cf4200f 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -131,9 +131,9 @@ jobs: uses: actions/upload-artifact@v2 with: name: code-coverage-report - path: build/build_coverage/html + path: html - name: Coveralls uses: coverallsapp/github-action@master with: github-token: ${{ secrets.GITHUB_TOKEN }} - path-to-lcov: build/build_coverage/json.info + path-to-lcov: json.info From 9ced2bf5c3cf0d8f74b26f1cfda42a525f53e838 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sun, 31 Jan 2021 15:31:15 +0100 Subject: [PATCH 091/113] :construction_worker: add CI step for coverage --- .clang-tidy | 2 +- .github/workflows/ubuntu.yml | 6 +++++- include/nlohmann/detail/input/binary_reader.hpp | 4 ++-- include/nlohmann/json.hpp | 4 ++-- single_include/nlohmann/json.hpp | 8 ++++---- 5 files changed, 14 insertions(+), 10 deletions(-) diff --git a/.clang-tidy b/.clang-tidy index 610b228399..5475b20244 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -41,4 +41,4 @@ CheckOptions: WarningsAsErrors: '*' #HeaderFilterRegex: '.*nlohmann.*' -HeaderFilterRegex: '.*\.hpp$' +HeaderFilterRegex: '.*/include/.*' diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 438cf4200f..3e86b42190 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -120,13 +120,17 @@ jobs: ci_test_coverage: runs-on: ubuntu-latest - container: nlohmann/json-ci:latest + container: + image: nlohmann/json-ci:latest + options: -v /__w/json/json:/workdir steps: - uses: actions/checkout@v2 - name: cmake run: cmake -S . -B build -DJSON_CI=On - name: build run: cmake --build build --target ci_test_coverage + - name: copy + run: cp -r html /workdir ; cp -r json.info /workdir - name: archive coverage report uses: actions/upload-artifact@v2 with: diff --git a/include/nlohmann/detail/input/binary_reader.hpp b/include/nlohmann/detail/input/binary_reader.hpp index 09b611485f..1d994ed6d6 100644 --- a/include/nlohmann/detail/input/binary_reader.hpp +++ b/include/nlohmann/detail/input/binary_reader.hpp @@ -78,9 +78,9 @@ class binary_reader // make class move-only binary_reader(const binary_reader&) = delete; - binary_reader(binary_reader&&) noexcept = default; + binary_reader(binary_reader&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) binary_reader& operator=(const binary_reader&) = delete; - binary_reader& operator=(binary_reader&&) noexcept = default; + binary_reader& operator=(binary_reader&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) ~binary_reader() = default; /*! diff --git a/include/nlohmann/json.hpp b/include/nlohmann/json.hpp index 23169faefe..06c66a695a 100644 --- a/include/nlohmann/json.hpp +++ b/include/nlohmann/json.hpp @@ -1444,7 +1444,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec typename U = detail::uncvref_t, detail::enable_if_t < !detail::is_basic_json::value && detail::is_compatible_type::value, int > = 0 > - basic_json(CompatibleType && val) noexcept(noexcept( // NOLINT(bugprone-forwarding-reference-overload) + basic_json(CompatibleType && val) noexcept(noexcept( // NOLINT(bugprone-forwarding-reference-overload,bugprone-exception-escape) JSONSerializer::to_json(std::declval(), std::forward(val)))) { @@ -8752,7 +8752,7 @@ struct less<::nlohmann::detail::value_t> @since version 1.0.0 */ template<> -inline void swap(nlohmann::json& j1, nlohmann::json& j2) noexcept( +inline void swap(nlohmann::json& j1, nlohmann::json& j2) noexcept( // NOLINT(readability-inconsistent-declaration-parameter-name) is_nothrow_move_constructible::value&& // NOLINT(misc-redundant-expression) is_nothrow_move_assignable::value ) diff --git a/single_include/nlohmann/json.hpp b/single_include/nlohmann/json.hpp index e59d8d6039..e66f9e72b6 100644 --- a/single_include/nlohmann/json.hpp +++ b/single_include/nlohmann/json.hpp @@ -7834,9 +7834,9 @@ class binary_reader // make class move-only binary_reader(const binary_reader&) = delete; - binary_reader(binary_reader&&) noexcept = default; + binary_reader(binary_reader&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) binary_reader& operator=(const binary_reader&) = delete; - binary_reader& operator=(binary_reader&&) noexcept = default; + binary_reader& operator=(binary_reader&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) ~binary_reader() = default; /*! @@ -18092,7 +18092,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec typename U = detail::uncvref_t, detail::enable_if_t < !detail::is_basic_json::value && detail::is_compatible_type::value, int > = 0 > - basic_json(CompatibleType && val) noexcept(noexcept( // NOLINT(bugprone-forwarding-reference-overload) + basic_json(CompatibleType && val) noexcept(noexcept( // NOLINT(bugprone-forwarding-reference-overload,bugprone-exception-escape) JSONSerializer::to_json(std::declval(), std::forward(val)))) { @@ -25400,7 +25400,7 @@ struct less<::nlohmann::detail::value_t> @since version 1.0.0 */ template<> -inline void swap(nlohmann::json& j1, nlohmann::json& j2) noexcept( +inline void swap(nlohmann::json& j1, nlohmann::json& j2) noexcept( // NOLINT(readability-inconsistent-declaration-parameter-name) is_nothrow_move_constructible::value&& // NOLINT(misc-redundant-expression) is_nothrow_move_assignable::value ) From 21a193c69ba3082927d6570514d750803fd30fe5 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sun, 31 Jan 2021 15:50:25 +0100 Subject: [PATCH 092/113] :construction_worker: add CI step for coverage --- .github/workflows/ubuntu.yml | 2 +- test/src/unit-constructor1.cpp | 14 +++++++------- test/src/unit-regression1.cpp | 2 +- test/src/unit-udt.cpp | 2 +- test/thirdparty/fifo_map/fifo_map.hpp | 8 ++++---- 5 files changed, 14 insertions(+), 14 deletions(-) diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 3e86b42190..20889fec9b 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -130,7 +130,7 @@ jobs: - name: build run: cmake --build build --target ci_test_coverage - name: copy - run: cp -r html /workdir ; cp -r json.info /workdir + run: cp -r build/build_coverage/html /workdir ; cp -r build/build_coverage/json.info /workdir - name: archive coverage report uses: actions/upload-artifact@v2 with: diff --git a/test/src/unit-constructor1.cpp b/test/src/unit-constructor1.cpp index ba9bf12b73..0ba21e25a3 100644 --- a/test/src/unit-constructor1.cpp +++ b/test/src/unit-constructor1.cpp @@ -1119,7 +1119,7 @@ TEST_CASE("constructors") { // This should break through any short string optimization in std::string std::string source(1024, '!'); - const char* source_addr = source.data(); + const char* source_addr = source.data(); // NOLINT(clang-analyzer-cplusplus.InnerPointer) json j = {std::move(source)}; CHECK(j[0].get_ref().data() == source_addr); } @@ -1128,7 +1128,7 @@ TEST_CASE("constructors") { // This should break through any short string optimization in std::string std::string source(1024, '!'); - const char* source_addr = source.data(); + const char* source_addr = source.data(); // NOLINT(clang-analyzer-cplusplus.InnerPointer) json j = {{"key", std::move(source)}}; CHECK(j["key"].get_ref().data() == source_addr); } @@ -1137,7 +1137,7 @@ TEST_CASE("constructors") { // This should break through any short string optimization in std::string std::string source(1024, '!'); - const char* source_addr = source.data(); + const char* source_addr = source.data(); // NOLINT(clang-analyzer-cplusplus.InnerPointer) json j = {{std::move(source), 42}}; CHECK(j.get_ref().begin()->first.data() == source_addr); } @@ -1148,7 +1148,7 @@ TEST_CASE("constructors") SECTION("constructor with implicit types (array)") { json::array_t source = {1, 2, 3}; - const json* source_addr = source.data(); + const json* source_addr = source.data(); // NOLINT(clang-analyzer-cplusplus.InnerPointer) json j {std::move(source)}; CHECK(j[0].get_ref().data() == source_addr); } @@ -1156,7 +1156,7 @@ TEST_CASE("constructors") SECTION("constructor with implicit types (object)") { json::array_t source = {1, 2, 3}; - const json* source_addr = source.data(); + const json* source_addr = source.data(); // NOLINT(clang-analyzer-cplusplus.InnerPointer) json j {{"key", std::move(source)}}; CHECK(j["key"].get_ref().data() == source_addr); } @@ -1164,7 +1164,7 @@ TEST_CASE("constructors") SECTION("assignment with implicit types (array)") { json::array_t source = {1, 2, 3}; - const json* source_addr = source.data(); + const json* source_addr = source.data(); // NOLINT(clang-analyzer-cplusplus.InnerPointer) json j = {std::move(source)}; CHECK(j[0].get_ref().data() == source_addr); } @@ -1172,7 +1172,7 @@ TEST_CASE("constructors") SECTION("assignment with implicit types (object)") { json::array_t source = {1, 2, 3}; - const json* source_addr = source.data(); + const json* source_addr = source.data(); // NOLINT(clang-analyzer-cplusplus.InnerPointer) json j = {{"key", std::move(source)}}; CHECK(j["key"].get_ref().data() == source_addr); } diff --git a/test/src/unit-regression1.cpp b/test/src/unit-regression1.cpp index 449d9fd91b..45dc7a3336 100644 --- a/test/src/unit-regression1.cpp +++ b/test/src/unit-regression1.cpp @@ -94,7 +94,7 @@ template struct foo_serializer < T, typename std::enable_if < !std::is_same::value >::type > { template - static void to_json(BasicJsonType& j, const T& value) noexcept + static void to_json(BasicJsonType& j, const T& value) noexcept // NOLINT(bugprone-exception-escape) { ::nlohmann::to_json(j, value); } diff --git a/test/src/unit-udt.cpp b/test/src/unit-udt.cpp index 0983b371f9..2bebd8f599 100644 --- a/test/src/unit-udt.cpp +++ b/test/src/unit-udt.cpp @@ -407,7 +407,7 @@ TEST_CASE("adl_serializer specialization" * doctest::test_suite("udt")) json j = optPerson; CHECK(j.is_null()); - optPerson.reset(new udt::person{{42}, {"John Doe"}, udt::country::russia}); // NOLINT(cppcoreguidelines-owning-memory) + optPerson.reset(new udt::person{{42}, {"John Doe"}, udt::country::russia}); // NOLINT(cppcoreguidelines-owning-memory,modernize-make-shared) j = optPerson; CHECK_FALSE(j.is_null()); diff --git a/test/thirdparty/fifo_map/fifo_map.hpp b/test/thirdparty/fifo_map/fifo_map.hpp index c281e3be3a..cfa38c97b5 100644 --- a/test/thirdparty/fifo_map/fifo_map.hpp +++ b/test/thirdparty/fifo_map/fifo_map.hpp @@ -99,7 +99,7 @@ template < class T, class Compare = fifo_map_compare, class Allocator = std::allocator> - > class fifo_map + > class fifo_map // NOLINT(cppcoreguidelines-special-member-functions,hicpp-special-member-functions,-warnings-as-errors) { public: using key_type = Key; @@ -514,10 +514,10 @@ template < internal_map_type m_map; }; -} +} // namespace nlohmann // specialization of std::swap -namespace std +namespace std // NOLINT(cert-dcl58-cpp,-warnings-as-errors) { template inline void swap(nlohmann::fifo_map& m1, @@ -525,6 +525,6 @@ inline void swap(nlohmann::fifo_map& m1, { m1.swap(m2); } -} +} // namespace std #endif From 053ed6c123c6c882611b63d207a64a745ab345cd Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sun, 31 Jan 2021 16:22:44 +0100 Subject: [PATCH 093/113] :construction_worker: add CI step for coverage --- .clang-tidy | 2 +- .github/workflows/ubuntu.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.clang-tidy b/.clang-tidy index 5475b20244..a28335936d 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -41,4 +41,4 @@ CheckOptions: WarningsAsErrors: '*' #HeaderFilterRegex: '.*nlohmann.*' -HeaderFilterRegex: '.*/include/.*' +HeaderFilterRegex: '.*hpp$' diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 20889fec9b..c9beaad151 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -130,7 +130,7 @@ jobs: - name: build run: cmake --build build --target ci_test_coverage - name: copy - run: cp -r build/build_coverage/html /workdir ; cp -r build/build_coverage/json.info /workdir + run: pwd ; ls -la ; cp -r /__w/json/json/build/build_coverage/html /workdir ; cp -r /__w/json/json/build/build_coverage/json.info /workdir - name: archive coverage report uses: actions/upload-artifact@v2 with: From 829f1196328fac61d44aa0532ffc40130cd4b4a2 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sun, 31 Jan 2021 17:06:06 +0100 Subject: [PATCH 094/113] :construction_worker: add CI step for coverage --- .github/workflows/ubuntu.yml | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index c9beaad151..56ef95ebeb 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -128,9 +128,14 @@ jobs: - name: cmake run: cmake -S . -B build -DJSON_CI=On - name: build - run: cmake --build build --target ci_test_coverage - - name: copy - run: pwd ; ls -la ; cp -r /__w/json/json/build/build_coverage/html /workdir ; cp -r /__w/json/json/build/build_coverage/json.info /workdir + run: | + cmake --build build --target ci_test_coverage + pwd + ls -la + ls -la build + ls -la build/build_coverage + cp -r /__w/json/json/build/build_coverage/html /workdir + cp -r /__w/json/json/build/build_coverage/json.info /workdir - name: archive coverage report uses: actions/upload-artifact@v2 with: From 91475eeff1d9e82549e474a590d26e8417e5e0ab Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sun, 31 Jan 2021 17:57:27 +0100 Subject: [PATCH 095/113] :construction_worker: add CI step for coverage --- .github/workflows/ubuntu.yml | 8 ++----- test/src/unit-constructor1.cpp | 42 ++++++++++++++++++++++------------ 2 files changed, 30 insertions(+), 20 deletions(-) diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 56ef95ebeb..9a9306349d 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -130,12 +130,8 @@ jobs: - name: build run: | cmake --build build --target ci_test_coverage - pwd - ls -la - ls -la build - ls -la build/build_coverage - cp -r /__w/json/json/build/build_coverage/html /workdir - cp -r /__w/json/json/build/build_coverage/json.info /workdir + cp -r /__w/json/json/build/html /workdir + cp -r /__w/json/json/build/json.info /workdir - name: archive coverage report uses: actions/upload-artifact@v2 with: diff --git a/test/src/unit-constructor1.cpp b/test/src/unit-constructor1.cpp index 0ba21e25a3..884a1e6f1d 100644 --- a/test/src/unit-constructor1.cpp +++ b/test/src/unit-constructor1.cpp @@ -1119,27 +1119,33 @@ TEST_CASE("constructors") { // This should break through any short string optimization in std::string std::string source(1024, '!'); - const char* source_addr = source.data(); // NOLINT(clang-analyzer-cplusplus.InnerPointer) + const auto* source_addr = source.data(); json j = {std::move(source)}; - CHECK(j[0].get_ref().data() == source_addr); + const auto* target_addr = j[0].get_ref().data(); + const bool success = (target_addr == source_addr); + CHECK(success); } SECTION("constructor with implicit types (object)") { // This should break through any short string optimization in std::string std::string source(1024, '!'); - const char* source_addr = source.data(); // NOLINT(clang-analyzer-cplusplus.InnerPointer) + const auto* source_addr = source.data(); json j = {{"key", std::move(source)}}; - CHECK(j["key"].get_ref().data() == source_addr); + const auto* target_addr = j["key"].get_ref().data(); + const bool success = (target_addr == source_addr); + CHECK(success); } SECTION("constructor with implicit types (object key)") { // This should break through any short string optimization in std::string std::string source(1024, '!'); - const char* source_addr = source.data(); // NOLINT(clang-analyzer-cplusplus.InnerPointer) + const auto* source_addr = source.data(); json j = {{std::move(source), 42}}; - CHECK(j.get_ref().begin()->first.data() == source_addr); + const auto* target_addr = j.get_ref().begin()->first.data(); + const bool success = (target_addr == source_addr); + CHECK(success); } } @@ -1148,33 +1154,41 @@ TEST_CASE("constructors") SECTION("constructor with implicit types (array)") { json::array_t source = {1, 2, 3}; - const json* source_addr = source.data(); // NOLINT(clang-analyzer-cplusplus.InnerPointer) + const auto* source_addr = source.data(); json j {std::move(source)}; - CHECK(j[0].get_ref().data() == source_addr); + const auto* target_addr = j[0].get_ref().data(); + const bool success = (target_addr == source_addr); + CHECK(success); } SECTION("constructor with implicit types (object)") { json::array_t source = {1, 2, 3}; - const json* source_addr = source.data(); // NOLINT(clang-analyzer-cplusplus.InnerPointer) + const auto* source_addr = source.data(); json j {{"key", std::move(source)}}; - CHECK(j["key"].get_ref().data() == source_addr); + const auto* target_addr = j["key"].get_ref().data(); + const bool success = (target_addr == source_addr); + CHECK(success); } SECTION("assignment with implicit types (array)") { json::array_t source = {1, 2, 3}; - const json* source_addr = source.data(); // NOLINT(clang-analyzer-cplusplus.InnerPointer) + const auto* source_addr = source.data(); json j = {std::move(source)}; - CHECK(j[0].get_ref().data() == source_addr); + const auto* target_addr = j[0].get_ref().data(); + const bool success = (target_addr == source_addr); + CHECK(success); } SECTION("assignment with implicit types (object)") { json::array_t source = {1, 2, 3}; - const json* source_addr = source.data(); // NOLINT(clang-analyzer-cplusplus.InnerPointer) + const auto* source_addr = source.data(); json j = {{"key", std::move(source)}}; - CHECK(j["key"].get_ref().data() == source_addr); + const auto* target_addr = j["key"].get_ref().data(); + const bool success = (target_addr == source_addr); + CHECK(success); } } From d6f54a0837d94a6e010892379010fd51c2799a00 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sun, 31 Jan 2021 18:37:47 +0100 Subject: [PATCH 096/113] :construction_worker: add CI step for coverage --- .github/workflows/ubuntu.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 9a9306349d..38ff8a536f 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -130,15 +130,15 @@ jobs: - name: build run: | cmake --build build --target ci_test_coverage - cp -r /__w/json/json/build/html /workdir - cp -r /__w/json/json/build/json.info /workdir + cp -vr /__w/json/json/build/html /workdir + cp -vr /__w/json/json/build/json.info /workdir - name: archive coverage report uses: actions/upload-artifact@v2 with: name: code-coverage-report - path: html + path: /__w/json/json/html - name: Coveralls uses: coverallsapp/github-action@master with: github-token: ${{ secrets.GITHUB_TOKEN }} - path-to-lcov: json.info + path-to-lcov: /__w/json/json/json.info From 557353318130e870139e72a1b9e3262417701e50 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sun, 31 Jan 2021 19:38:03 +0100 Subject: [PATCH 097/113] :construction_worker: add CI step for coverage --- .github/workflows/ubuntu.yml | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 38ff8a536f..28225bc51f 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -120,25 +120,20 @@ jobs: ci_test_coverage: runs-on: ubuntu-latest - container: - image: nlohmann/json-ci:latest - options: -v /__w/json/json:/workdir + container: nlohmann/json-ci:latest steps: - uses: actions/checkout@v2 - name: cmake run: cmake -S . -B build -DJSON_CI=On - name: build - run: | - cmake --build build --target ci_test_coverage - cp -vr /__w/json/json/build/html /workdir - cp -vr /__w/json/json/build/json.info /workdir + run: cmake --build build --target ci_test_coverage - name: archive coverage report uses: actions/upload-artifact@v2 with: name: code-coverage-report - path: /__w/json/json/html + path: /__w/json/json/build/html - name: Coveralls uses: coverallsapp/github-action@master with: github-token: ${{ secrets.GITHUB_TOKEN }} - path-to-lcov: /__w/json/json/json.info + path-to-lcov: /__w/json/json/build/json.info From 799472a308fa0b45278265add4cffcfc2fe6ef22 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Sun, 31 Jan 2021 22:08:25 +0100 Subject: [PATCH 098/113] :construction_worker: add CI step for coverage --- .github/workflows/ubuntu.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 28225bc51f..f3fcab4bd0 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -136,4 +136,4 @@ jobs: uses: coverallsapp/github-action@master with: github-token: ${{ secrets.GITHUB_TOKEN }} - path-to-lcov: /__w/json/json/build/json.info + path-to-lcov: /__w/json/json/build/json.info.filtered.noexcept From c9e66df00c20be2177150883b548d9edc4cf9729 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Mon, 8 Feb 2021 21:00:37 +0100 Subject: [PATCH 099/113] :construction_worker: add CI steps for disabled exceptions and implicit conversions --- .circleci/config.yml | 56 ----------------------------- .github/workflows/ubuntu.yml | 20 +++++++++++ .travis.yml | 68 +----------------------------------- README.md | 4 +-- cmake/ci.cmake | 28 +++++++++++++++ test/src/UBSAN.supp | 1 - 6 files changed, 50 insertions(+), 127 deletions(-) delete mode 100644 .circleci/config.yml delete mode 100644 test/src/UBSAN.supp diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index 82e5098408..0000000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,56 +0,0 @@ -version: 2 -jobs: - build_stable: - docker: - - image: debian:stretch - - steps: - - checkout - - - run: - name: Install required tools - command: 'apt-get update && apt-get install -y gcc g++ git cmake' - - run: - name: Run CMake - command: 'mkdir build ; cd build ; cmake .. -DJSON_BuildTests=On' - - run: - name: Compile - command: 'cmake --build build' - - run: - name: Execute test suite - command: 'cd build ; ctest --output-on-failure -j 2' - - build_bleeding_edge: - docker: - - image: archlinux - - steps: - - checkout - - - run: - name: Install required tools - command: 'pacman -Sy --noconfirm base base-devel gcc git cmake' - - run: - name: Run CMake - command: 'mkdir build ; cd build ; cmake .. -DJSON_BuildTests=On' - - run: - name: Compile - command: 'cmake --build build' - - run: - name: Execute test suite - command: 'cd build ; ctest --output-on-failure -j 2' - -workflows: - version: 2 - build_and_test_all: - jobs: - - build_stable: - filters: - branches: - ignore: - gh-pages - - build_bleeding_edge: - filters: - branches: - ignore: - gh-pages diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index f3fcab4bd0..10ce098a7b 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -108,6 +108,26 @@ jobs: - name: build run: cmake --build build --target ci_clang_tidy + ci_test_noexceptions: + runs-on: ubuntu-latest + container: nlohmann/json-ci:latest + steps: + - uses: actions/checkout@v2 + - name: cmake + run: cmake -S . -B build -DJSON_CI=On + - name: build + run: cmake --build build --target ci_test_noexceptions + + ci_test_noimplicitconversions: + runs-on: ubuntu-latest + container: nlohmann/json-ci:latest + steps: + - uses: actions/checkout@v2 + - name: cmake + run: cmake -S . -B build -DJSON_CI=On + - name: build + run: cmake --build build --target ci_test_noimplicitconversions + ci_test_clang_sanitizer: runs-on: ubuntu-latest container: nlohmann/json-ci:latest diff --git a/.travis.yml b/.travis.yml index f48ee1fd6d..eaf45169b0 100644 --- a/.travis.yml +++ b/.travis.yml @@ -17,55 +17,6 @@ group: edge matrix: include: - # Valgrind - - os: linux - compiler: gcc - env: - - COMPILER=g++-4.9 - - CMAKE_OPTIONS=-DJSON_Valgrind=ON - addons: - apt: - sources: ['ubuntu-toolchain-r-test'] - packages: ['g++-4.9', 'valgrind', 'ninja-build'] - - # clang sanitizer - - os: linux - compiler: clang - env: - - COMPILER=clang++-7 - - CMAKE_OPTIONS=-DJSON_Sanitizer=ON - - UBSAN_OPTIONS=print_stacktrace=1,suppressions=$(pwd)/test/src/UBSAN.supp - addons: - apt: - sources: ['ubuntu-toolchain-r-test', 'llvm-toolchain-trusty-7'] - packages: ['g++-6', 'clang-7', 'ninja-build'] - before_script: - - export PATH=$PATH:/usr/lib/llvm-7/bin - - # cppcheck - - os: linux - compiler: gcc - env: - - COMPILER=g++-4.9 - - SPECIAL=cppcheck - addons: - apt: - sources: ['ubuntu-toolchain-r-test'] - packages: ['g++-4.9', 'cppcheck', 'ninja-build'] - after_success: - - make cppcheck - - # no exceptions - - os: linux - compiler: gcc - env: - - COMPILER=g++-4.9 - - CMAKE_OPTIONS=-DJSON_NoExceptions=ON - addons: - apt: - sources: ['ubuntu-toolchain-r-test'] - packages: ['g++-4.9', 'ninja-build'] - # check amalgamation - os: linux compiler: gcc @@ -133,11 +84,6 @@ matrix: - os: osx osx_image: xcode12 - - os: osx - osx_image: xcode12 - env: - - IMPLICIT_CONVERSIONS=OFF - # Linux / GCC - os: linux @@ -196,16 +142,6 @@ matrix: sources: ['ubuntu-toolchain-r-test'] packages: ['g++-9', 'ninja-build'] - - os: linux - compiler: gcc - env: - - COMPILER=g++-9 - - IMPLICIT_CONVERSIONS=OFF - addons: - apt: - sources: ['ubuntu-toolchain-r-test'] - packages: ['g++-9', 'ninja-build'] - - os: linux compiler: gcc env: @@ -318,15 +254,13 @@ script: - if [[ "${COMPILER}" != "" ]]; then export CXX=${COMPILER}; fi # by default, use the single-header version - if [[ "${MULTIPLE_HEADERS}" == "" ]]; then export MULTIPLE_HEADERS=OFF; fi - # by default, use implicit conversions - - if [[ "${IMPLICIT_CONVERSIONS}" == "" ]]; then export IMPLICIT_CONVERSIONS=ON; fi # append CXX_STANDARD to CMAKE_OPTIONS if required - CMAKE_OPTIONS+=${CXX_STANDARD:+ -DCMAKE_CXX_STANDARD=$CXX_STANDARD -DCMAKE_CXX_STANDARD_REQUIRED=ON} # compile and execute unit tests - mkdir -p build && cd build - - cmake .. ${CMAKE_OPTIONS} -DJSON_MultipleHeaders=${MULTIPLE_HEADERS} -DJSON_ImplicitConversions=${IMPLICIT_CONVERSIONS} -DJSON_BuildTests=On -GNinja && cmake --build . --config Release + - cmake .. ${CMAKE_OPTIONS} -DJSON_MultipleHeaders=${MULTIPLE_HEADERS} -DJSON_BuildTests=On -GNinja && cmake --build . --config Release - ctest -C Release --timeout 2700 -V -j - cd .. diff --git a/README.md b/README.md index 391d07ede2..71bda436ce 100644 --- a/README.md +++ b/README.md @@ -1228,7 +1228,7 @@ Please note: - Unsupported versions of GCC and Clang are rejected by `#error` directives. This can be switched off by defining `JSON_SKIP_UNSUPPORTED_COMPILER_CHECK`. Note that you can expect no support in this case. -The following compilers are currently used in continuous integration at [Travis](https://travis-ci.org/nlohmann/json), [AppVeyor](https://ci.appveyor.com/project/nlohmann/json), [GitHub Actions](https://github.com/nlohmann/json/actions), and [CircleCI](https://circleci.com/gh/nlohmann/json): +The following compilers are currently used in continuous integration at [Travis](https://travis-ci.org/nlohmann/json), [AppVeyor](https://ci.appveyor.com/project/nlohmann/json), and [GitHub Actions](https://github.com/nlohmann/json/actions): | Compiler | Operating System | CI Provider | |-------------------------------------------------------------------|--------------------|----------------| @@ -1251,14 +1251,12 @@ The following compilers are currently used in continuous integration at [Travis] | GCC 4.8.5 (Ubuntu 4.8.5-4ubuntu8\~14.04.2) | Ubuntu 14.04.5 LTS | Travis | | GCC 4.9.4 (Ubuntu 4.9.4-2ubuntu1\~14.04.1) | Ubuntu 14.04.5 LTS | Travis | | GCC 5.5.0 (Ubuntu 5.5.0-12ubuntu1\~14.04) | Ubuntu 14.04.5 LTS | Travis | -| GCC 6.3.0 (Debian 6.3.0-18+deb9u1) | Debian 9 | Circle CI | | GCC 6.5.0 (Ubuntu 6.5.0-2ubuntu1\~14.04.1) | Ubuntu 14.04.5 LTS | Travis | | GCC 7.3.0 (x86_64-posix-seh-rev0, Built by MinGW-W64 project) | Windows-6.3.9600 | AppVeyor | | GCC 7.5.0 (Ubuntu 7.5.0-3ubuntu1\~14.04.1) | Ubuntu 14.04.5 LTS | Travis | | GCC 7.5.0 (Ubuntu 7.5.0-3ubuntu1\~18.04) | Ubuntu 18.04.4 LTS | GitHub Actions | | GCC 8.4.0 (Ubuntu 8.4.0-1ubuntu1\~14.04) | Ubuntu 14.04.5 LTS | Travis | | GCC 9.3.0 (Ubuntu 9.3.0-11ubuntu0\~14.04) | Ubuntu 14.04.5 LTS | Travis | -| GCC 10.1.0 (Arch Linux latest) | Arch Linux | Circle CI | | MSVC 19.0.24241.7 (Build Engine version 14.0.25420.1) | Windows-6.3.9600 | AppVeyor | | MSVC 19.16.27035.0 (15.9.21+g9802d43bc3 for .NET Framework) | Windows-10.0.14393 | AppVeyor | | MSVC 19.25.28614.0 (Build Engine version 16.5.0+d4cbfca49 for .NET Framework) | Windows-10.0.17763 | AppVeyor | diff --git a/cmake/ci.cmake b/cmake/ci.cmake index dfad0c22a2..84296fd455 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -415,6 +415,34 @@ foreach(CXX_STANDARD 11 14 17 20) ) endforeach() +############################################################################### +# Disable exceptions. +############################################################################### + +add_custom_target(ci_test_noexceptions + COMMAND CXX=${CLANG_TOOL} ${CMAKE_COMMAND} + -DCMAKE_BUILD_TYPE=Debug -GNinja + -DJSON_BuildTests=ON -DJSON_MultipleHeaders=ON -DCMAKE_CXX_FLAGS=-DJSON_NOEXCEPTION -DDOCTEST_TEST_FILTER=--no-throw + -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_noexceptions + COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_noexceptions + COMMAND cd ${PROJECT_BINARY_DIR}/build_noexceptions && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure + COMMENT "Compile and test with with exceptions switched off" +) + +############################################################################### +# Disable implicit conversions. +############################################################################### + +add_custom_target(ci_test_noimplicitconversions + COMMAND CXX=${CLANG_TOOL} ${CMAKE_COMMAND} + -DCMAKE_BUILD_TYPE=Debug -GNinja + -DJSON_BuildTests=ON -DJSON_MultipleHeaders=ON -DJSON_ImplicitConversions=ON + -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_noimplicitconversions + COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_noimplicitconversions + COMMAND cd ${PROJECT_BINARY_DIR}/build_noimplicitconversions && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure + COMMENT "Compile and test with with implicit conversions switched off" +) + ############################################################################### # Coverage. ############################################################################### diff --git a/test/src/UBSAN.supp b/test/src/UBSAN.supp deleted file mode 100644 index b19f043699..0000000000 --- a/test/src/UBSAN.supp +++ /dev/null @@ -1 +0,0 @@ -unsigned-integer-overflow:stl_bvector.h From 29b27e3ee1a61515bf702752e2c9251500d9abbe Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Mon, 8 Feb 2021 21:04:02 +0100 Subject: [PATCH 100/113] :rotating_light: fix warnings --- include/nlohmann/thirdparty/hedley/hedley_undef.hpp | 2 ++ single_include/nlohmann/json.hpp | 2 ++ 2 files changed, 4 insertions(+) diff --git a/include/nlohmann/thirdparty/hedley/hedley_undef.hpp b/include/nlohmann/thirdparty/hedley/hedley_undef.hpp index e74f4dfbfe..d2b37a16d6 100644 --- a/include/nlohmann/thirdparty/hedley/hedley_undef.hpp +++ b/include/nlohmann/thirdparty/hedley/hedley_undef.hpp @@ -1,3 +1,5 @@ +#pragma once + #undef JSON_HEDLEY_ALWAYS_INLINE #undef JSON_HEDLEY_ARM_VERSION #undef JSON_HEDLEY_ARM_VERSION_CHECK diff --git a/single_include/nlohmann/json.hpp b/single_include/nlohmann/json.hpp index d416091744..20a58ba452 100644 --- a/single_include/nlohmann/json.hpp +++ b/single_include/nlohmann/json.hpp @@ -25543,6 +25543,8 @@ inline nlohmann::json::json_pointer operator "" _json_pointer(const char* s, std #undef JSON_EXPLICIT // #include + + #undef JSON_HEDLEY_ALWAYS_INLINE #undef JSON_HEDLEY_ARM_VERSION #undef JSON_HEDLEY_ARM_VERSION_CHECK From d68b7f735f8e70f961ec9684cfe58eb5c2a8545a Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Tue, 9 Feb 2021 09:51:14 +0100 Subject: [PATCH 101/113] :construction_worker: add CI steps for checking indentation --- .github/workflows/ubuntu.yml | 10 ++++++++++ cmake/ci.cmake | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+) diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 10ce098a7b..7f5fe71f77 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -108,6 +108,16 @@ jobs: - name: build run: cmake --build build --target ci_clang_tidy + ci_test_amalgamation: + runs-on: ubuntu-latest + container: nlohmann/json-ci:latest + steps: + - uses: actions/checkout@v2 + - name: cmake + run: cmake -S . -B build -DJSON_CI=On + - name: build + run: cmake --build build --target ci_test_amalgamation + ci_test_noexceptions: runs-on: ubuntu-latest container: nlohmann/json-ci:latest diff --git a/cmake/ci.cmake b/cmake/ci.cmake index 84296fd455..47f1a3fa51 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -8,6 +8,11 @@ set(N 10) include(FindPython3) find_package(Python3 COMPONENTS Interpreter) +find_program(ASTYLE_TOOL NAMES astyle) +execute_process(COMMAND ${ASTYLE_TOOL} --version OUTPUT_VARIABLE ASTYLE_TOOL_VERSION ERROR_VARIABLE ASTYLE_TOOL_VERSION) +string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" ASTYLE_TOOL_VERSION "${ASTYLE_TOOL_VERSION}") +message(STATUS "πŸ”– Artistic Style ${ASTYLE_TOOL_VERSION} (${ASTYLE_TOOL})") + find_program(CLANG_TOOL NAMES clang++-HEAD clang++-11 clang++) execute_process(COMMAND ${CLANG_TOOL} --version OUTPUT_VARIABLE CLANG_TOOL_VERSION ERROR_VARIABLE CLANG_TOOL_VERSION) string(REGEX MATCH "[0-9]+(\\.[0-9]+)+" CLANG_TOOL_VERSION "${CLANG_TOOL_VERSION}") @@ -479,6 +484,34 @@ add_custom_target(ci_test_clang_sanitizer COMMENT "Compile and test with sanitizers" ) +############################################################################### +# Check if header is amalgamated and sources are properly indented. +############################################################################### + +set(ASTYLE_FLAGS --style=allman --indent=spaces=4 --indent-modifiers --indent-switches --indent-preproc-block --indent-preproc-define --indent-col1-comments --pad-oper --pad-header --align-pointer=type --align-reference=type --add-brackets --convert-tabs --close-templates --lineend=linux --preserve-date --formatted) + +file(GLOB_RECURSE SRC_FILES + ${PROJECT_SOURCE_DIR}/include/nlohmann/*.hpp + ${PROJECT_SOURCE_DIR}/test/src/*.cpp + ${PROJECT_SOURCE_DIR}/test/src/*.hpp + ${PROJECT_SOURCE_DIR}/benchmarks/src/benchmarks.cpp + ${PROJECT_SOURCE_DIR}/doc/examples/*.cpp +) + +add_custom_target(ci_test_amalgamation + COMMAND rm -fr ${PROJECT_SOURCE_DIR}/single_include/nlohmann/json.hpp~ + COMMAND cp ${PROJECT_SOURCE_DIR}/single_include/nlohmann/json.hpp ${PROJECT_SOURCE_DIR}/single_include/nlohmann/json.hpp~ + COMMAND ${Python3_EXECUTABLE} ${PROJECT_SOURCE_DIR}/third_party/amalgamate/amalgamate.py -c ${PROJECT_SOURCE_DIR}/third_party/amalgamate/config.json -s . + COMMAND ${ASTYLE_TOOL} ${ASTYLE_FLAGS} --suffix=none --quiet ${PROJECT_SOURCE_DIR}/single_include/nlohmann/json.hpp + COMMAND diff ${PROJECT_SOURCE_DIR}/single_include/nlohmann/json.hpp~ ${PROJECT_SOURCE_DIR}/single_include/nlohmann/json.hpp + + COMMAND ${ASTYLE_TOOL} ${ASTYLE_FLAGS} ${SRC_FILES} + COMMAND cd ${PROJECT_SOURCE_DIR} && for FILE in `find . -name '*.orig'`\; do false \; done + + WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} + COMMENT "Check amalagamation and indentation" +) + ############################################################################### # Valgrind. ############################################################################### From efc1116beb2198cc70d6d15d26d87a2ce360063b Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Tue, 9 Feb 2021 09:56:25 +0100 Subject: [PATCH 102/113] :bug: fix variable use --- cmake/ci.cmake | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmake/ci.cmake b/cmake/ci.cmake index 47f1a3fa51..bd0261d585 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -490,7 +490,7 @@ add_custom_target(ci_test_clang_sanitizer set(ASTYLE_FLAGS --style=allman --indent=spaces=4 --indent-modifiers --indent-switches --indent-preproc-block --indent-preproc-define --indent-col1-comments --pad-oper --pad-header --align-pointer=type --align-reference=type --add-brackets --convert-tabs --close-templates --lineend=linux --preserve-date --formatted) -file(GLOB_RECURSE SRC_FILES +file(GLOB_RECURSE INDENT_FILES ${PROJECT_SOURCE_DIR}/include/nlohmann/*.hpp ${PROJECT_SOURCE_DIR}/test/src/*.cpp ${PROJECT_SOURCE_DIR}/test/src/*.hpp @@ -505,7 +505,7 @@ add_custom_target(ci_test_amalgamation COMMAND ${ASTYLE_TOOL} ${ASTYLE_FLAGS} --suffix=none --quiet ${PROJECT_SOURCE_DIR}/single_include/nlohmann/json.hpp COMMAND diff ${PROJECT_SOURCE_DIR}/single_include/nlohmann/json.hpp~ ${PROJECT_SOURCE_DIR}/single_include/nlohmann/json.hpp - COMMAND ${ASTYLE_TOOL} ${ASTYLE_FLAGS} ${SRC_FILES} + COMMAND ${ASTYLE_TOOL} ${ASTYLE_FLAGS} ${INDENT_FILES} COMMAND cd ${PROJECT_SOURCE_DIR} && for FILE in `find . -name '*.orig'`\; do false \; done WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} From bea760cc7e213061525279ccb0655b1f2bae03a4 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Wed, 10 Feb 2021 08:30:09 +0100 Subject: [PATCH 103/113] :green_heart: fix build --- include/nlohmann/json.hpp | 5 +++-- single_include/nlohmann/json.hpp | 6 ++++-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/include/nlohmann/json.hpp b/include/nlohmann/json.hpp index eb6f240c93..2fc49dde30 100644 --- a/include/nlohmann/json.hpp +++ b/include/nlohmann/json.hpp @@ -64,6 +64,7 @@ SOFTWARE. #include #include #include +#include #include #include #include @@ -8655,7 +8656,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec for (auto it = source.cbegin(); it != source.cend(); ++it) { // escape the key name to be used in a JSON patch - const auto path_key = path + "/" + json_pointer::escape(it.key()); + const auto path_key = path + "/" + detail::escape(it.key()); if (target.find(it.key()) != target.end()) { @@ -8679,7 +8680,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec if (source.find(it.key()) == source.end()) { // found a key that is not in this -> add it - const auto path_key = path + "/" + json_pointer::escape(it.key()); + const auto path_key = path + "/" + detail::escape(it.key()); result.push_back( { {"op", "add"}, {"path", path_key}, diff --git a/single_include/nlohmann/json.hpp b/single_include/nlohmann/json.hpp index 3703c1b81d..032d1ebd16 100644 --- a/single_include/nlohmann/json.hpp +++ b/single_include/nlohmann/json.hpp @@ -12884,6 +12884,8 @@ class json_ref // #include +// #include + // #include // #include @@ -25466,7 +25468,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec for (auto it = source.cbegin(); it != source.cend(); ++it) { // escape the key name to be used in a JSON patch - const auto path_key = path + "/" + json_pointer::escape(it.key()); + const auto path_key = path + "/" + detail::escape(it.key()); if (target.find(it.key()) != target.end()) { @@ -25490,7 +25492,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec if (source.find(it.key()) == source.end()) { // found a key that is not in this -> add it - const auto path_key = path + "/" + json_pointer::escape(it.key()); + const auto path_key = path + "/" + detail::escape(it.key()); result.push_back( { {"op", "add"}, {"path", path_key}, From 378622c5c67b997ce406e8f53f57ea6c441431cf Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Wed, 10 Feb 2021 08:30:49 +0100 Subject: [PATCH 104/113] :heavy_minus_sign: remove CircleCI --- README.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/README.md b/README.md index 71bda436ce..d1341376c5 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,6 @@ [![Ubuntu](https://github.com/nlohmann/json/workflows/Ubuntu/badge.svg)](https://github.com/nlohmann/json/actions?query=workflow%3AUbuntu) [![macOS](https://github.com/nlohmann/json/workflows/macOS/badge.svg)](https://github.com/nlohmann/json/actions?query=workflow%3AmacOS) [![Windows](https://github.com/nlohmann/json/workflows/Windows/badge.svg)](https://github.com/nlohmann/json/actions?query=workflow%3AWindows) -[![Build Status](https://circleci.com/gh/nlohmann/json.svg?style=svg)](https://circleci.com/gh/nlohmann/json) [![Coverage Status](https://coveralls.io/repos/github/nlohmann/json/badge.svg?branch=develop)](https://coveralls.io/github/nlohmann/json?branch=develop) [![Coverity Scan Build Status](https://scan.coverity.com/projects/5550/badge.svg)](https://scan.coverity.com/projects/nlohmann-json) [![Codacy Badge](https://api.codacy.com/project/badge/Grade/f3732b3327e34358a0e9d1fe9f661f08)](https://www.codacy.com/app/nlohmann/json?utm_source=github.com&utm_medium=referral&utm_content=nlohmann/json&utm_campaign=Badge_Grade) @@ -1539,7 +1538,6 @@ The library itself consists of a single header file licensed under the MIT licen - [**American fuzzy lop**](https://lcamtuf.coredump.cx/afl/) for fuzz testing - [**AppVeyor**](https://www.appveyor.com) for [continuous integration](https://ci.appveyor.com/project/nlohmann/json) on Windows - [**Artistic Style**](http://astyle.sourceforge.net) for automatic source code indentation -- [**CircleCI**](https://circleci.com) for [continuous integration](https://circleci.com/gh/nlohmann/json). - [**Clang**](https://clang.llvm.org) for compilation with code sanitizers - [**CMake**](https://cmake.org) for build automation - [**Codacity**](https://www.codacy.com) for further [code analysis](https://www.codacy.com/app/nlohmann/json) From 9882e4ac7d3cba88fd48c3b87d12034abe9947e7 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Wed, 10 Feb 2021 08:40:19 +0100 Subject: [PATCH 105/113] :construction_worker: add CI step for diagnostics --- .github/workflows/ubuntu.yml | 10 ++++++++++ cmake/ci.cmake | 20 +++++++++++++++++--- include/nlohmann/detail/exceptions.hpp | 2 ++ single_include/nlohmann/json.hpp | 2 ++ 4 files changed, 31 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 7f5fe71f77..d09b640fe4 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -118,6 +118,16 @@ jobs: - name: build run: cmake --build build --target ci_test_amalgamation + ci_test_diagnostics: + runs-on: ubuntu-latest + container: nlohmann/json-ci:latest + steps: + - uses: actions/checkout@v2 + - name: cmake + run: cmake -S . -B build -DJSON_CI=On + - name: build + run: cmake --build build --target ci_test_diagnostics + ci_test_noexceptions: runs-on: ubuntu-latest container: nlohmann/json-ci:latest diff --git a/cmake/ci.cmake b/cmake/ci.cmake index bd0261d585..5085c369dd 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -431,7 +431,7 @@ add_custom_target(ci_test_noexceptions -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_noexceptions COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_noexceptions COMMAND cd ${PROJECT_BINARY_DIR}/build_noexceptions && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure - COMMENT "Compile and test with with exceptions switched off" + COMMENT "Compile and test with exceptions switched off" ) ############################################################################### @@ -445,7 +445,21 @@ add_custom_target(ci_test_noimplicitconversions -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_noimplicitconversions COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_noimplicitconversions COMMAND cd ${PROJECT_BINARY_DIR}/build_noimplicitconversions && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure - COMMENT "Compile and test with with implicit conversions switched off" + COMMENT "Compile and test with implicit conversions switched off" +) + +############################################################################### +# Enable improved diagnostics. +############################################################################### + +add_custom_target(ci_test_diagnostics + COMMAND CXX=${CLANG_TOOL} ${CMAKE_COMMAND} + -DCMAKE_BUILD_TYPE=Debug -GNinja + -DJSON_BuildTests=ON -DJSON_MultipleHeaders=ON -DJSON_Diagnostics=ON + -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_diagnostics + COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_diagnostics + COMMAND cd ${PROJECT_BINARY_DIR}/build_diagnostics && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure + COMMENT "Compile and test with improved diagnostics enabled" ) ############################################################################### @@ -741,7 +755,7 @@ else() ) endif() -set(JSON_CMAKE_FLAGS "JSON_BuildTests;JSON_Install;JSON_MultipleHeaders;JSON_Sanitizer;JSON_Valgrind;JSON_NoExceptions;JSON_Coverage") +set(JSON_CMAKE_FLAGS "JSON_BuildTests;JSON_Install;JSON_MultipleHeaders;JSON_Sanitizer;JSON_Valgrind;JSON_NoExceptions;JSON_Coverage;JSON_Diagnostics") foreach(JSON_CMAKE_FLAG ${JSON_CMAKE_FLAGS}) string(TOLOWER "ci_cmake_flag_${JSON_CMAKE_FLAG}" JSON_CMAKE_FLAG_TARGET) diff --git a/include/nlohmann/detail/exceptions.hpp b/include/nlohmann/detail/exceptions.hpp index 74fd048a35..8ad5d123ad 100644 --- a/include/nlohmann/detail/exceptions.hpp +++ b/include/nlohmann/detail/exceptions.hpp @@ -3,6 +3,7 @@ #include // exception #include // runtime_error #include // to_string +#include // vector #include #include @@ -118,6 +119,7 @@ class exception : public std::exception return a + "/" + detail::escape(b); }) + ") "; #else + static_cast(leaf_element); return ""; #endif } diff --git a/single_include/nlohmann/json.hpp b/single_include/nlohmann/json.hpp index 032d1ebd16..c43c5e7983 100644 --- a/single_include/nlohmann/json.hpp +++ b/single_include/nlohmann/json.hpp @@ -72,6 +72,7 @@ SOFTWARE. #include // exception #include // runtime_error #include // to_string +#include // vector // #include @@ -2705,6 +2706,7 @@ class exception : public std::exception return a + "/" + detail::escape(b); }) + ") "; #else + static_cast(leaf_element); return ""; #endif } From 1e08af816dcdedc17e1272434b542ed535433623 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Tue, 23 Mar 2021 12:31:03 +0100 Subject: [PATCH 106/113] :rotating_light: fix warning --- test/src/unit-diagnostics.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/src/unit-diagnostics.cpp b/test/src/unit-diagnostics.cpp index 1cea374a74..21ced33b1f 100644 --- a/test/src/unit-diagnostics.cpp +++ b/test/src/unit-diagnostics.cpp @@ -93,7 +93,8 @@ TEST_CASE("Better diagnostics") SECTION("Parse error") { - CHECK_THROWS_WITH_AS(json::parse(""), "[json.exception.parse_error.101] parse error at line 1, column 1: syntax error while parsing value - unexpected end of input; expected '[', '{', or a literal", json::parse_error); + json _; + CHECK_THROWS_WITH_AS(_ = json::parse(""), "[json.exception.parse_error.101] parse error at line 1, column 1: syntax error while parsing value - unexpected end of input; expected '[', '{', or a literal", json::parse_error); } SECTION("Regression test for https://github.com/nlohmann/json/pull/2562#pullrequestreview-574858448") From fc38454a224c778f5f8b0b82b62e25359649745b Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Tue, 23 Mar 2021 13:10:13 +0100 Subject: [PATCH 107/113] :rotating_light: fix warning --- include/nlohmann/json.hpp | 10 +++++++--- single_include/nlohmann/json.hpp | 10 +++++++--- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/include/nlohmann/json.hpp b/include/nlohmann/json.hpp index 2fc49dde30..8dc9a348b6 100644 --- a/include/nlohmann/json.hpp +++ b/include/nlohmann/json.hpp @@ -1247,10 +1247,14 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec JSON_ASSERT(m_type != value_t::binary || m_value.binary != nullptr); #if JSON_DIAGNOSTICS - JSON_ASSERT(!check_parents || !is_structured() || std::all_of(begin(), end(), [this](const basic_json & j) + try { - return j.m_parent == this; - })); + JSON_ASSERT(!check_parents || !is_structured() || std::all_of(begin(), end(), [this](const basic_json & j) + { + return j.m_parent == this; + })); + } + catch (..) {} #else static_cast(check_parents); #endif diff --git a/single_include/nlohmann/json.hpp b/single_include/nlohmann/json.hpp index 54880efd6b..8407110b50 100644 --- a/single_include/nlohmann/json.hpp +++ b/single_include/nlohmann/json.hpp @@ -18060,10 +18060,14 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec JSON_ASSERT(m_type != value_t::binary || m_value.binary != nullptr); #if JSON_DIAGNOSTICS - JSON_ASSERT(!check_parents || !is_structured() || std::all_of(begin(), end(), [this](const basic_json & j) + try { - return j.m_parent == this; - })); + JSON_ASSERT(!check_parents || !is_structured() || std::all_of(begin(), end(), [this](const basic_json & j) + { + return j.m_parent == this; + })); + } + catch (..) {} #else static_cast(check_parents); #endif From 5306eef15095731e2eda8337038c8231c5a6c3a6 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Tue, 23 Mar 2021 13:15:06 +0100 Subject: [PATCH 108/113] :rotating_light: fix warning --- include/nlohmann/json.hpp | 2 +- single_include/nlohmann/json.hpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/include/nlohmann/json.hpp b/include/nlohmann/json.hpp index 8dc9a348b6..8b133fa7a2 100644 --- a/include/nlohmann/json.hpp +++ b/include/nlohmann/json.hpp @@ -1254,7 +1254,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec return j.m_parent == this; })); } - catch (..) {} + catch (...) {} #else static_cast(check_parents); #endif diff --git a/single_include/nlohmann/json.hpp b/single_include/nlohmann/json.hpp index 8407110b50..7301d5f04e 100644 --- a/single_include/nlohmann/json.hpp +++ b/single_include/nlohmann/json.hpp @@ -18067,7 +18067,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec return j.m_parent == this; })); } - catch (..) {} + catch (...) {} #else static_cast(check_parents); #endif From 4428121ec78f048f1bbdf62b7928cdd4fb298d5c Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Tue, 23 Mar 2021 13:31:35 +0100 Subject: [PATCH 109/113] :rotating_light: fix warning --- include/nlohmann/json.hpp | 13 +++++-------- single_include/nlohmann/json.hpp | 13 +++++-------- 2 files changed, 10 insertions(+), 16 deletions(-) diff --git a/include/nlohmann/json.hpp b/include/nlohmann/json.hpp index 8b133fa7a2..a2da4b4d9d 100644 --- a/include/nlohmann/json.hpp +++ b/include/nlohmann/json.hpp @@ -1239,7 +1239,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec during destruction of objects when the invariant does not need to hold. */ - void assert_invariant(bool check_parents = true) const noexcept + void assert_invariant(bool check_parents = true) const noexcept try { JSON_ASSERT(m_type != value_t::object || m_value.object != nullptr); JSON_ASSERT(m_type != value_t::array || m_value.array != nullptr); @@ -1247,18 +1247,15 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec JSON_ASSERT(m_type != value_t::binary || m_value.binary != nullptr); #if JSON_DIAGNOSTICS - try + JSON_ASSERT(!check_parents || !is_structured() || std::all_of(begin(), end(), [this](const basic_json & j) { - JSON_ASSERT(!check_parents || !is_structured() || std::all_of(begin(), end(), [this](const basic_json & j) - { - return j.m_parent == this; - })); - } - catch (...) {} + return j.m_parent == this; + })); #else static_cast(check_parents); #endif } + catch (...) {} void set_parents() { diff --git a/single_include/nlohmann/json.hpp b/single_include/nlohmann/json.hpp index 7301d5f04e..5724dbb2bc 100644 --- a/single_include/nlohmann/json.hpp +++ b/single_include/nlohmann/json.hpp @@ -18052,7 +18052,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec during destruction of objects when the invariant does not need to hold. */ - void assert_invariant(bool check_parents = true) const noexcept + void assert_invariant(bool check_parents = true) const noexcept try { JSON_ASSERT(m_type != value_t::object || m_value.object != nullptr); JSON_ASSERT(m_type != value_t::array || m_value.array != nullptr); @@ -18060,18 +18060,15 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec JSON_ASSERT(m_type != value_t::binary || m_value.binary != nullptr); #if JSON_DIAGNOSTICS - try + JSON_ASSERT(!check_parents || !is_structured() || std::all_of(begin(), end(), [this](const basic_json & j) { - JSON_ASSERT(!check_parents || !is_structured() || std::all_of(begin(), end(), [this](const basic_json & j) - { - return j.m_parent == this; - })); - } - catch (...) {} + return j.m_parent == this; + })); #else static_cast(check_parents); #endif } + catch (...) {} void set_parents() { From bc23505c54a1017282af18e12149d18b4789214b Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Tue, 23 Mar 2021 13:39:31 +0100 Subject: [PATCH 110/113] :rotating_light: fix warning --- include/nlohmann/json.hpp | 1 + single_include/nlohmann/json.hpp | 1 + 2 files changed, 2 insertions(+) diff --git a/include/nlohmann/json.hpp b/include/nlohmann/json.hpp index a2da4b4d9d..cf3908949d 100644 --- a/include/nlohmann/json.hpp +++ b/include/nlohmann/json.hpp @@ -1247,6 +1247,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec JSON_ASSERT(m_type != value_t::binary || m_value.binary != nullptr); #if JSON_DIAGNOSTICS + // cppcheck-suppress assertWithSideEffect JSON_ASSERT(!check_parents || !is_structured() || std::all_of(begin(), end(), [this](const basic_json & j) { return j.m_parent == this; diff --git a/single_include/nlohmann/json.hpp b/single_include/nlohmann/json.hpp index 5724dbb2bc..def56bb87c 100644 --- a/single_include/nlohmann/json.hpp +++ b/single_include/nlohmann/json.hpp @@ -18060,6 +18060,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec JSON_ASSERT(m_type != value_t::binary || m_value.binary != nullptr); #if JSON_DIAGNOSTICS + // cppcheck-suppress assertWithSideEffect JSON_ASSERT(!check_parents || !is_structured() || std::all_of(begin(), end(), [this](const basic_json & j) { return j.m_parent == this; From 63c52b1201f753271b95337a1e03cc9dcbf14a7e Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Tue, 23 Mar 2021 13:56:46 +0100 Subject: [PATCH 111/113] :rotating_light: fix warning --- include/nlohmann/json.hpp | 15 +++++++++------ single_include/nlohmann/json.hpp | 15 +++++++++------ 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/include/nlohmann/json.hpp b/include/nlohmann/json.hpp index cf3908949d..147fb36eaf 100644 --- a/include/nlohmann/json.hpp +++ b/include/nlohmann/json.hpp @@ -1239,7 +1239,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec during destruction of objects when the invariant does not need to hold. */ - void assert_invariant(bool check_parents = true) const noexcept try + void assert_invariant(bool check_parents = true) const noexcept { JSON_ASSERT(m_type != value_t::object || m_value.object != nullptr); JSON_ASSERT(m_type != value_t::array || m_value.array != nullptr); @@ -1247,16 +1247,19 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec JSON_ASSERT(m_type != value_t::binary || m_value.binary != nullptr); #if JSON_DIAGNOSTICS - // cppcheck-suppress assertWithSideEffect - JSON_ASSERT(!check_parents || !is_structured() || std::all_of(begin(), end(), [this](const basic_json & j) + JSON_TRY { - return j.m_parent == this; - })); + // cppcheck-suppress assertWithSideEffect + JSON_ASSERT(!check_parents || !is_structured() || std::all_of(begin(), end(), [this](const basic_json & j) + { + return j.m_parent == this; + })); + } + JSON_CATCH(...) {} #else static_cast(check_parents); #endif } - catch (...) {} void set_parents() { diff --git a/single_include/nlohmann/json.hpp b/single_include/nlohmann/json.hpp index def56bb87c..3760063b8b 100644 --- a/single_include/nlohmann/json.hpp +++ b/single_include/nlohmann/json.hpp @@ -18052,7 +18052,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec during destruction of objects when the invariant does not need to hold. */ - void assert_invariant(bool check_parents = true) const noexcept try + void assert_invariant(bool check_parents = true) const noexcept { JSON_ASSERT(m_type != value_t::object || m_value.object != nullptr); JSON_ASSERT(m_type != value_t::array || m_value.array != nullptr); @@ -18060,16 +18060,19 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec JSON_ASSERT(m_type != value_t::binary || m_value.binary != nullptr); #if JSON_DIAGNOSTICS - // cppcheck-suppress assertWithSideEffect - JSON_ASSERT(!check_parents || !is_structured() || std::all_of(begin(), end(), [this](const basic_json & j) + JSON_TRY { - return j.m_parent == this; - })); + // cppcheck-suppress assertWithSideEffect + JSON_ASSERT(!check_parents || !is_structured() || std::all_of(begin(), end(), [this](const basic_json & j) + { + return j.m_parent == this; + })); + } + JSON_CATCH(...) {} #else static_cast(check_parents); #endif } - catch (...) {} void set_parents() { From f7ada0a8e1a3797c608771267e67204ca4a1221b Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Tue, 23 Mar 2021 18:15:52 +0100 Subject: [PATCH 112/113] :rotating_light: fix warning --- include/nlohmann/json.hpp | 2 +- single_include/nlohmann/json.hpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/include/nlohmann/json.hpp b/include/nlohmann/json.hpp index 147fb36eaf..3c9fa9199a 100644 --- a/include/nlohmann/json.hpp +++ b/include/nlohmann/json.hpp @@ -1255,7 +1255,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec return j.m_parent == this; })); } - JSON_CATCH(...) {} + JSON_CATCH(...) {} // LCOV_EXCL_LINE #else static_cast(check_parents); #endif diff --git a/single_include/nlohmann/json.hpp b/single_include/nlohmann/json.hpp index 3760063b8b..282f33291f 100644 --- a/single_include/nlohmann/json.hpp +++ b/single_include/nlohmann/json.hpp @@ -18068,7 +18068,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec return j.m_parent == this; })); } - JSON_CATCH(...) {} + JSON_CATCH(...) {} // LCOV_EXCL_LINE #else static_cast(check_parents); #endif From 3d0812a38220ec3fefe0a74a5e4b03e1b3973e43 Mon Sep 17 00:00:00 2001 From: Niels Lohmann Date: Tue, 23 Mar 2021 18:20:56 +0100 Subject: [PATCH 113/113] :fire: clean Travis --- .travis.yml | 36 +----------------------------------- 1 file changed, 1 insertion(+), 35 deletions(-) diff --git a/.travis.yml b/.travis.yml index eaf45169b0..cfa68c5233 100644 --- a/.travis.yml +++ b/.travis.yml @@ -17,38 +17,6 @@ group: edge matrix: include: - # check amalgamation - - os: linux - compiler: gcc - env: - - COMPILER=g++-4.9 - - SPECIAL=amalgamation - - MULTIPLE_HEADERS=ON - addons: - apt: - sources: ['ubuntu-toolchain-r-test'] - packages: ['g++-4.9', 'astyle', 'ninja-build'] - after_success: - - make check-amalgamation - - # Coveralls (http://gronlier.fr/blog/2015/01/adding-code-coverage-to-your-c-project/) - - - os: linux - compiler: gcc - dist: bionic - addons: - apt: - sources: ['ubuntu-toolchain-r-test'] - packages: ['g++-7', 'ninja-build'] - before_script: - - pip install --user cpp-coveralls - after_success: - - coveralls --build-root test --include include/nlohmann --gcov 'gcov-7' --gcov-options '\-lp' - env: - - COMPILER=g++-7 - - CMAKE_OPTIONS=-DJSON_Coverage=ON - - MULTIPLE_HEADERS=ON - # Coverity (only for branch coverity_scan) - os: linux @@ -252,15 +220,13 @@ script: # make sure CXX is correctly set - if [[ "${COMPILER}" != "" ]]; then export CXX=${COMPILER}; fi - # by default, use the single-header version - - if [[ "${MULTIPLE_HEADERS}" == "" ]]; then export MULTIPLE_HEADERS=OFF; fi # append CXX_STANDARD to CMAKE_OPTIONS if required - CMAKE_OPTIONS+=${CXX_STANDARD:+ -DCMAKE_CXX_STANDARD=$CXX_STANDARD -DCMAKE_CXX_STANDARD_REQUIRED=ON} # compile and execute unit tests - mkdir -p build && cd build - - cmake .. ${CMAKE_OPTIONS} -DJSON_MultipleHeaders=${MULTIPLE_HEADERS} -DJSON_BuildTests=On -GNinja && cmake --build . --config Release + - cmake .. ${CMAKE_OPTIONS} -DJSON_BuildTests=On -GNinja && cmake --build . --config Release - ctest -C Release --timeout 2700 -V -j - cd ..